Import prebuilt clang toolchain for linux.
diff --git a/linux-x64/clang/include/llvm/CodeGen/AccelTable.h b/linux-x64/clang/include/llvm/CodeGen/AccelTable.h
new file mode 100644
index 0000000..ec850a8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/AccelTable.h
@@ -0,0 +1,373 @@
+//==- include/llvm/CodeGen/AccelTable.h - Accelerator Tables -----*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for writing accelerator tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_DWARFACCELTABLE_H
+#define LLVM_CODEGEN_DWARFACCELTABLE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/CodeGen/DIE.h"
+#include "llvm/CodeGen/DwarfStringPoolEntry.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/DJB.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstddef>
+#include <cstdint>
+#include <vector>
+
+/// The DWARF and Apple accelerator tables are an indirect hash table optimized
+/// for null lookup rather than access to known data. The Apple accelerator
+/// tables are a precursor of the newer DWARF v5 accelerator tables. Both
+/// formats share common design ideas.
+///
+/// The Apple accelerator table are output into an on-disk format that looks
+/// like this:
+///
+/// .------------------.
+/// |  HEADER          |
+/// |------------------|
+/// |  BUCKETS         |
+/// |------------------|
+/// |  HASHES          |
+/// |------------------|
+/// |  OFFSETS         |
+/// |------------------|
+/// |  DATA            |
+/// `------------------'
+///
+/// The header contains a magic number, version, type of hash function,
+/// the number of buckets, total number of hashes, and room for a special struct
+/// of data and the length of that struct.
+///
+/// The buckets contain an index (e.g. 6) into the hashes array. The hashes
+/// section contains all of the 32-bit hash values in contiguous memory, and the
+/// offsets contain the offset into the data area for the particular hash.
+///
+/// For a lookup example, we could hash a function name and take it modulo the
+/// number of buckets giving us our bucket. From there we take the bucket value
+/// as an index into the hashes table and look at each successive hash as long
+/// as the hash value is still the same modulo result (bucket value) as earlier.
+/// If we have a match we look at that same entry in the offsets table and grab
+/// the offset in the data for our final match.
+///
+/// The DWARF v5 accelerator table consists of zero or more name indices that
+/// are output into an on-disk format that looks like this:
+///
+/// .------------------.
+/// |  HEADER          |
+/// |------------------|
+/// |  CU LIST         |
+/// |------------------|
+/// |  LOCAL TU LIST   |
+/// |------------------|
+/// |  FOREIGN TU LIST |
+/// |------------------|
+/// |  HASH TABLE      |
+/// |------------------|
+/// |  NAME TABLE      |
+/// |------------------|
+/// |  ABBREV TABLE    |
+/// |------------------|
+/// |  ENTRY POOL      |
+/// `------------------'
+///
+/// For the full documentation please refer to the DWARF 5 standard.
+///
+///
+/// This file defines the class template AccelTable, which is represents an
+/// abstract view of an Accelerator table, without any notion of an on-disk
+/// layout. This class is parameterized by an entry type, which should derive
+/// from AccelTableData. This is the type of individual entries in the table,
+/// and it should store the data necessary to emit them. AppleAccelTableData is
+/// the base class for Apple Accelerator Table entries, which have a uniform
+/// structure based on a sequence of Atoms. There are different sub-classes
+/// derived from AppleAccelTable, which differ in the set of Atoms and how they
+/// obtain their values.
+///
+/// An Apple Accelerator Table can be serialized by calling emitAppleAccelTable
+/// function.
+///
+/// TODO: Add DWARF v5 emission code.
+
+namespace llvm {
+
+class AsmPrinter;
+
+/// Interface which the different types of accelerator table data have to
+/// conform. It serves as a base class for different values of the template
+/// argument of the AccelTable class template.
+class AccelTableData {
+public:
+  virtual ~AccelTableData() = default;
+
+  bool operator<(const AccelTableData &Other) const {
+    return order() < Other.order();
+  }
+
+  // Subclasses should implement:
+  // static uint32_t hash(StringRef Name);
+
+#ifndef NDEBUG
+  virtual void print(raw_ostream &OS) const = 0;
+#endif
+protected:
+  virtual uint64_t order() const = 0;
+};
+
+/// A base class holding non-template-dependant functionality of the AccelTable
+/// class. Clients should not use this class directly but rather instantiate
+/// AccelTable with a type derived from AccelTableData.
+class AccelTableBase {
+public:
+  using HashFn = uint32_t(StringRef);
+
+  /// Represents a group of entries with identical name (and hence, hash value).
+  struct HashData {
+    DwarfStringPoolEntryRef Name;
+    uint32_t HashValue;
+    std::vector<AccelTableData *> Values;
+    MCSymbol *Sym;
+
+    HashData(DwarfStringPoolEntryRef Name, HashFn *Hash)
+        : Name(Name), HashValue(Hash(Name.getString())) {}
+
+#ifndef NDEBUG
+    void print(raw_ostream &OS) const;
+    void dump() const { print(dbgs()); }
+#endif
+  };
+  using HashList = std::vector<HashData *>;
+  using BucketList = std::vector<HashList>;
+
+protected:
+  /// Allocator for HashData and Values.
+  BumpPtrAllocator Allocator;
+
+  using StringEntries = StringMap<HashData, BumpPtrAllocator &>;
+  StringEntries Entries;
+
+  HashFn *Hash;
+  uint32_t BucketCount;
+  uint32_t UniqueHashCount;
+
+  HashList Hashes;
+  BucketList Buckets;
+
+  void computeBucketCount();
+
+  AccelTableBase(HashFn *Hash) : Entries(Allocator), Hash(Hash) {}
+
+public:
+  void finalize(AsmPrinter *Asm, StringRef Prefix);
+  ArrayRef<HashList> getBuckets() const { return Buckets; }
+  uint32_t getBucketCount() const { return BucketCount; }
+  uint32_t getUniqueHashCount() const { return UniqueHashCount; }
+  uint32_t getUniqueNameCount() const { return Entries.size(); }
+
+#ifndef NDEBUG
+  void print(raw_ostream &OS) const;
+  void dump() const { print(dbgs()); }
+#endif
+
+  AccelTableBase(const AccelTableBase &) = delete;
+  void operator=(const AccelTableBase &) = delete;
+};
+
+/// This class holds an abstract representation of an Accelerator Table,
+/// consisting of a sequence of buckets, each bucket containint a sequence of
+/// HashData entries. The class is parameterized by the type of entries it
+/// holds. The type template parameter also defines the hash function to use for
+/// hashing names.
+template <typename DataT> class AccelTable : public AccelTableBase {
+public:
+  AccelTable() : AccelTableBase(DataT::hash) {}
+
+  template <typename... Types>
+  void addName(DwarfStringPoolEntryRef Name, Types &&... Args);
+};
+
+template <typename AccelTableDataT>
+template <typename... Types>
+void AccelTable<AccelTableDataT>::addName(DwarfStringPoolEntryRef Name,
+                                          Types &&... Args) {
+  assert(Buckets.empty() && "Already finalized!");
+  // If the string is in the list already then add this die to the list
+  // otherwise add a new one.
+  auto Iter = Entries.try_emplace(Name.getString(), Name, Hash).first;
+  assert(Iter->second.Name == Name);
+  Iter->second.Values.push_back(
+      new (Allocator) AccelTableDataT(std::forward<Types>(Args)...));
+}
+
+/// A base class for different implementations of Data classes for Apple
+/// Accelerator Tables. The columns in the table are defined by the static Atoms
+/// variable defined on the subclasses.
+class AppleAccelTableData : public AccelTableData {
+public:
+  /// An Atom defines the form of the data in an Apple accelerator table.
+  /// Conceptually it is a column in the accelerator consisting of a type and a
+  /// specification of the form of its data.
+  struct Atom {
+    /// Atom Type.
+    const uint16_t Type;
+    /// DWARF Form.
+    const uint16_t Form;
+
+    constexpr Atom(uint16_t Type, uint16_t Form) : Type(Type), Form(Form) {}
+
+#ifndef NDEBUG
+    void print(raw_ostream &OS) const;
+    void dump() const { print(dbgs()); }
+#endif
+  };
+  // Subclasses should define:
+  // static constexpr Atom Atoms[];
+
+  virtual void emit(AsmPrinter *Asm) const = 0;
+
+  static uint32_t hash(StringRef Buffer) { return djbHash(Buffer); }
+};
+
+void emitAppleAccelTableImpl(AsmPrinter *Asm, AccelTableBase &Contents,
+                             StringRef Prefix, const MCSymbol *SecBegin,
+                             ArrayRef<AppleAccelTableData::Atom> Atoms);
+
+/// Emit an Apple Accelerator Table consisting of entries in the specified
+/// AccelTable. The DataT template parameter should be derived from
+/// AppleAccelTableData.
+template <typename DataT>
+void emitAppleAccelTable(AsmPrinter *Asm, AccelTable<DataT> &Contents,
+                         StringRef Prefix, const MCSymbol *SecBegin) {
+  static_assert(std::is_convertible<DataT *, AppleAccelTableData *>::value, "");
+  emitAppleAccelTableImpl(Asm, Contents, Prefix, SecBegin, DataT::Atoms);
+}
+
+/// Accelerator table data implementation for simple Apple accelerator tables
+/// with just a DIE reference.
+class AppleAccelTableOffsetData : public AppleAccelTableData {
+public:
+  AppleAccelTableOffsetData(const DIE *D) : Die(D) {}
+
+  void emit(AsmPrinter *Asm) const override;
+
+#ifndef _MSC_VER
+  // The line below is rejected by older versions (TBD) of MSVC.
+  static constexpr Atom Atoms[] = {
+      Atom(dwarf::DW_ATOM_die_offset, dwarf::DW_FORM_data4)};
+#else
+  // FIXME: Erase this path once the minimum MSCV version has been bumped.
+  static const SmallVector<Atom, 4> Atoms;
+#endif
+
+#ifndef NDEBUG
+  void print(raw_ostream &OS) const override;
+#endif
+protected:
+  uint64_t order() const override { return Die->getOffset(); }
+
+  const DIE *Die;
+};
+
+/// Accelerator table data implementation for Apple type accelerator tables.
+class AppleAccelTableTypeData : public AppleAccelTableOffsetData {
+public:
+  AppleAccelTableTypeData(const DIE *D) : AppleAccelTableOffsetData(D) {}
+
+  void emit(AsmPrinter *Asm) const override;
+
+#ifndef _MSC_VER
+  // The line below is rejected by older versions (TBD) of MSVC.
+  static constexpr Atom Atoms[] = {
+      Atom(dwarf::DW_ATOM_die_offset, dwarf::DW_FORM_data4),
+      Atom(dwarf::DW_ATOM_die_tag, dwarf::DW_FORM_data2),
+      Atom(dwarf::DW_ATOM_type_flags, dwarf::DW_FORM_data1)};
+#else
+  // FIXME: Erase this path once the minimum MSCV version has been bumped.
+  static const SmallVector<Atom, 4> Atoms;
+#endif
+
+#ifndef NDEBUG
+  void print(raw_ostream &OS) const override;
+#endif
+};
+
+/// Accelerator table data implementation for simple Apple accelerator tables
+/// with a DIE offset but no actual DIE pointer.
+class AppleAccelTableStaticOffsetData : public AppleAccelTableData {
+public:
+  AppleAccelTableStaticOffsetData(uint32_t Offset) : Offset(Offset) {}
+
+  void emit(AsmPrinter *Asm) const override;
+
+#ifndef _MSC_VER
+  // The line below is rejected by older versions (TBD) of MSVC.
+  static constexpr Atom Atoms[] = {
+      Atom(dwarf::DW_ATOM_die_offset, dwarf::DW_FORM_data4)};
+#else
+  // FIXME: Erase this path once the minimum MSCV version has been bumped.
+  static const SmallVector<Atom, 4> Atoms;
+#endif
+
+#ifndef NDEBUG
+  void print(raw_ostream &OS) const override;
+#endif
+protected:
+  uint64_t order() const override { return Offset; }
+
+  uint32_t Offset;
+};
+
+/// Accelerator table data implementation for type accelerator tables with
+/// a DIE offset but no actual DIE pointer.
+class AppleAccelTableStaticTypeData : public AppleAccelTableStaticOffsetData {
+public:
+  AppleAccelTableStaticTypeData(uint32_t Offset, uint16_t Tag,
+                                bool ObjCClassIsImplementation,
+                                uint32_t QualifiedNameHash)
+      : AppleAccelTableStaticOffsetData(Offset),
+        QualifiedNameHash(QualifiedNameHash), Tag(Tag),
+        ObjCClassIsImplementation(ObjCClassIsImplementation) {}
+
+  void emit(AsmPrinter *Asm) const override;
+
+#ifndef _MSC_VER
+  // The line below is rejected by older versions (TBD) of MSVC.
+  static constexpr Atom Atoms[] = {
+      Atom(dwarf::DW_ATOM_die_offset, dwarf::DW_FORM_data4),
+      Atom(dwarf::DW_ATOM_die_tag, dwarf::DW_FORM_data2),
+      Atom(5, dwarf::DW_FORM_data1), Atom(6, dwarf::DW_FORM_data4)};
+#else
+  // FIXME: Erase this path once the minimum MSCV version has been bumped.
+  static const SmallVector<Atom, 4> Atoms;
+#endif
+
+#ifndef NDEBUG
+  void print(raw_ostream &OS) const override;
+#endif
+protected:
+  uint64_t order() const override { return Offset; }
+
+  uint32_t QualifiedNameHash;
+  uint16_t Tag;
+  bool ObjCClassIsImplementation;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_DWARFACCELTABLE_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/Analysis.h b/linux-x64/clang/include/llvm/CodeGen/Analysis.h
new file mode 100644
index 0000000..ba88f1f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/Analysis.h
@@ -0,0 +1,131 @@
+//===- CodeGen/Analysis.h - CodeGen LLVM IR Analysis Utilities --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares several CodeGen-specific LLVM IR analysis utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ANALYSIS_H
+#define LLVM_CODEGEN_ANALYSIS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/Support/CodeGen.h"
+
+namespace llvm {
+class GlobalValue;
+class MachineBasicBlock;
+class MachineFunction;
+class TargetLoweringBase;
+class TargetLowering;
+class TargetMachine;
+class SDNode;
+class SDValue;
+class SelectionDAG;
+struct EVT;
+
+/// \brief Compute the linearized index of a member in a nested
+/// aggregate/struct/array.
+///
+/// Given an LLVM IR aggregate type and a sequence of insertvalue or
+/// extractvalue indices that identify a member, return the linearized index of
+/// the start of the member, i.e the number of element in memory before the
+/// sought one. This is disconnected from the number of bytes.
+///
+/// \param Ty is the type indexed by \p Indices.
+/// \param Indices is an optional pointer in the indices list to the current
+/// index.
+/// \param IndicesEnd is the end of the indices list.
+/// \param CurIndex is the current index in the recursion.
+///
+/// \returns \p CurIndex plus the linear index in \p Ty  the indices list.
+unsigned ComputeLinearIndex(Type *Ty,
+                            const unsigned *Indices,
+                            const unsigned *IndicesEnd,
+                            unsigned CurIndex = 0);
+
+inline unsigned ComputeLinearIndex(Type *Ty,
+                                   ArrayRef<unsigned> Indices,
+                                   unsigned CurIndex = 0) {
+  return ComputeLinearIndex(Ty, Indices.begin(), Indices.end(), CurIndex);
+}
+
+/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
+/// EVTs that represent all the individual underlying
+/// non-aggregate types that comprise it.
+///
+/// If Offsets is non-null, it points to a vector to be filled in
+/// with the in-memory offsets of each of the individual values.
+///
+void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
+                     SmallVectorImpl<EVT> &ValueVTs,
+                     SmallVectorImpl<uint64_t> *Offsets = nullptr,
+                     uint64_t StartingOffset = 0);
+
+/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
+GlobalValue *ExtractTypeInfo(Value *V);
+
+/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
+/// processed uses a memory 'm' constraint.
+bool hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
+                               const TargetLowering &TLI);
+
+/// getFCmpCondCode - Return the ISD condition code corresponding to
+/// the given LLVM IR floating-point condition code.  This includes
+/// consideration of global floating-point math flags.
+///
+ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred);
+
+/// getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats,
+/// return the equivalent code if we're allowed to assume that NaNs won't occur.
+ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC);
+
+/// getICmpCondCode - Return the ISD condition code corresponding to
+/// the given LLVM IR integer condition code.
+///
+ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred);
+
+/// Test if the given instruction is in a position to be optimized
+/// with a tail-call. This roughly means that it's in a block with
+/// a return and there's nothing that needs to be scheduled
+/// between it and the return.
+///
+/// This function only tests target-independent requirements.
+bool isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM);
+
+/// Test if given that the input instruction is in the tail call position, if
+/// there is an attribute mismatch between the caller and the callee that will
+/// inhibit tail call optimizations.
+/// \p AllowDifferingSizes is an output parameter which, if forming a tail call
+/// is permitted, determines whether it's permitted only if the size of the
+/// caller's and callee's return types match exactly.
+bool attributesPermitTailCall(const Function *F, const Instruction *I,
+                              const ReturnInst *Ret,
+                              const TargetLoweringBase &TLI,
+                              bool *AllowDifferingSizes = nullptr);
+
+/// Test if given that the input instruction is in the tail call position if the
+/// return type or any attributes of the function will inhibit tail call
+/// optimization.
+bool returnTypeIsEligibleForTailCall(const Function *F, const Instruction *I,
+                                     const ReturnInst *Ret,
+                                     const TargetLoweringBase &TLI);
+
+DenseMap<const MachineBasicBlock *, int>
+getFuncletMembership(const MachineFunction &MF);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/AsmPrinter.h b/linux-x64/clang/include/llvm/CodeGen/AsmPrinter.h
new file mode 100644
index 0000000..3d3bd3a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/AsmPrinter.h
@@ -0,0 +1,645 @@
+//===- llvm/CodeGen/AsmPrinter.h - AsmPrinter Framework ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a class to be used as the base class for target specific
+// asm writers.  This class primarily handles common functionality used by
+// all asm writers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ASMPRINTER_H
+#define LLVM_CODEGEN_ASMPRINTER_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/CodeGen/DwarfStringPoolEntry.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SourceMgr.h"
+#include <cstdint>
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class AsmPrinterHandler;
+class BasicBlock;
+class BlockAddress;
+class Constant;
+class ConstantArray;
+class DataLayout;
+class DIE;
+class DIEAbbrev;
+class DwarfDebug;
+class GCMetadataPrinter;
+class GCStrategy;
+class GlobalIndirectSymbol;
+class GlobalObject;
+class GlobalValue;
+class GlobalVariable;
+class MachineBasicBlock;
+class MachineConstantPoolValue;
+class MachineFunction;
+class MachineInstr;
+class MachineJumpTableInfo;
+class MachineLoopInfo;
+class MachineModuleInfo;
+class MachineOptimizationRemarkEmitter;
+class MCAsmInfo;
+class MCCFIInstruction;
+struct MCCodePaddingContext;
+class MCContext;
+class MCExpr;
+class MCInst;
+class MCSection;
+class MCStreamer;
+class MCSubtargetInfo;
+class MCSymbol;
+class MCTargetOptions;
+class MDNode;
+class Module;
+class raw_ostream;
+class TargetLoweringObjectFile;
+class TargetMachine;
+
+/// This class is intended to be used as a driving class for all asm writers.
+class AsmPrinter : public MachineFunctionPass {
+public:
+  /// Target machine description.
+  TargetMachine &TM;
+
+  /// Target Asm Printer information.
+  const MCAsmInfo *MAI;
+
+  /// This is the context for the output file that we are streaming. This owns
+  /// all of the global MC-related objects for the generated translation unit.
+  MCContext &OutContext;
+
+  /// This is the MCStreamer object for the file we are generating. This
+  /// contains the transient state for the current translation unit that we are
+  /// generating (such as the current section etc).
+  std::unique_ptr<MCStreamer> OutStreamer;
+
+  /// The current machine function.
+  const MachineFunction *MF = nullptr;
+
+  /// This is a pointer to the current MachineModuleInfo.
+  MachineModuleInfo *MMI = nullptr;
+
+  /// Optimization remark emitter.
+  MachineOptimizationRemarkEmitter *ORE;
+
+  /// The symbol for the current function. This is recalculated at the beginning
+  /// of each call to runOnMachineFunction().
+  MCSymbol *CurrentFnSym = nullptr;
+
+  /// The symbol used to represent the start of the current function for the
+  /// purpose of calculating its size (e.g. using the .size directive). By
+  /// default, this is equal to CurrentFnSym.
+  MCSymbol *CurrentFnSymForSize = nullptr;
+
+  /// Map global GOT equivalent MCSymbols to GlobalVariables and keep track of
+  /// its number of uses by other globals.
+  using GOTEquivUsePair = std::pair<const GlobalVariable *, unsigned>;
+  MapVector<const MCSymbol *, GOTEquivUsePair> GlobalGOTEquivs;
+
+  /// Enable print [latency:throughput] in output.
+  bool EnablePrintSchedInfo = false;
+
+private:
+  MCSymbol *CurrentFnBegin = nullptr;
+  MCSymbol *CurrentFnEnd = nullptr;
+  MCSymbol *CurExceptionSym = nullptr;
+
+  // The garbage collection metadata printer table.
+  void *GCMetadataPrinters = nullptr; // Really a DenseMap.
+
+  /// Emit comments in assembly output if this is true.
+  bool VerboseAsm;
+
+  static char ID;
+
+  /// If VerboseAsm is set, a pointer to the loop info for this function.
+  MachineLoopInfo *LI = nullptr;
+
+  struct HandlerInfo {
+    AsmPrinterHandler *Handler;
+    const char *TimerName;
+    const char *TimerDescription;
+    const char *TimerGroupName;
+    const char *TimerGroupDescription;
+
+    HandlerInfo(AsmPrinterHandler *Handler, const char *TimerName,
+                const char *TimerDescription, const char *TimerGroupName,
+                const char *TimerGroupDescription)
+        : Handler(Handler), TimerName(TimerName),
+          TimerDescription(TimerDescription), TimerGroupName(TimerGroupName),
+          TimerGroupDescription(TimerGroupDescription) {}
+  };
+
+  /// A vector of all debug/EH info emitters we should use. This vector
+  /// maintains ownership of the emitters.
+  SmallVector<HandlerInfo, 1> Handlers;
+
+public:
+  struct SrcMgrDiagInfo {
+    SourceMgr SrcMgr;
+    std::vector<const MDNode *> LocInfos;
+    LLVMContext::InlineAsmDiagHandlerTy DiagHandler;
+    void *DiagContext;
+  };
+
+private:
+  /// Structure for generating diagnostics for inline assembly. Only initialised
+  /// when necessary.
+  mutable std::unique_ptr<SrcMgrDiagInfo> DiagInfo;
+
+  /// If the target supports dwarf debug info, this pointer is non-null.
+  DwarfDebug *DD = nullptr;
+
+  /// If the current module uses dwarf CFI annotations strictly for debugging.
+  bool isCFIMoveForDebugging = false;
+
+protected:
+  explicit AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer);
+
+public:
+  ~AsmPrinter() override;
+
+  DwarfDebug *getDwarfDebug() { return DD; }
+  DwarfDebug *getDwarfDebug() const { return DD; }
+
+  uint16_t getDwarfVersion() const;
+  void setDwarfVersion(uint16_t Version);
+
+  bool isPositionIndependent() const;
+
+  /// Return true if assembly output should contain comments.
+  bool isVerbose() const { return VerboseAsm; }
+
+  /// Return a unique ID for the current function.
+  unsigned getFunctionNumber() const;
+
+  MCSymbol *getFunctionBegin() const { return CurrentFnBegin; }
+  MCSymbol *getFunctionEnd() const { return CurrentFnEnd; }
+  MCSymbol *getCurExceptionSym();
+
+  /// Return information about object file lowering.
+  const TargetLoweringObjectFile &getObjFileLowering() const;
+
+  /// Return information about data layout.
+  const DataLayout &getDataLayout() const;
+
+  /// Return the pointer size from the TargetMachine
+  unsigned getPointerSize() const;
+
+  /// Return information about subtarget.
+  const MCSubtargetInfo &getSubtargetInfo() const;
+
+  void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
+
+  /// Return the current section we are emitting to.
+  const MCSection *getCurrentSection() const;
+
+  void getNameWithPrefix(SmallVectorImpl<char> &Name,
+                         const GlobalValue *GV) const;
+
+  MCSymbol *getSymbol(const GlobalValue *GV) const;
+
+  //===------------------------------------------------------------------===//
+  // XRay instrumentation implementation.
+  //===------------------------------------------------------------------===//
+public:
+  // This describes the kind of sled we're storing in the XRay table.
+  enum class SledKind : uint8_t {
+    FUNCTION_ENTER = 0,
+    FUNCTION_EXIT = 1,
+    TAIL_CALL = 2,
+    LOG_ARGS_ENTER = 3,
+    CUSTOM_EVENT = 4,
+  };
+
+  // The table will contain these structs that point to the sled, the function
+  // containing the sled, and what kind of sled (and whether they should always
+  // be instrumented). We also use a version identifier that the runtime can use
+  // to decide what to do with the sled, depending on the version of the sled.
+  struct XRayFunctionEntry {
+    const MCSymbol *Sled;
+    const MCSymbol *Function;
+    SledKind Kind;
+    bool AlwaysInstrument;
+    const class Function *Fn;
+    uint8_t Version;
+
+    void emit(int, MCStreamer *, const MCSymbol *) const;
+  };
+
+  // All the sleds to be emitted.
+  SmallVector<XRayFunctionEntry, 4> Sleds;
+
+  // A unique ID used for ELF sections associated with a particular function.
+  unsigned XRayFnUniqueID = 0;
+
+  // Helper function to record a given XRay sled.
+  void recordSled(MCSymbol *Sled, const MachineInstr &MI, SledKind Kind,
+                  uint8_t Version = 0);
+
+  /// Emit a table with all XRay instrumentation points.
+  void emitXRayTable();
+
+  //===------------------------------------------------------------------===//
+  // MachineFunctionPass Implementation.
+  //===------------------------------------------------------------------===//
+
+  /// Record analysis usage.
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  /// Set up the AsmPrinter when we are working on a new module. If your pass
+  /// overrides this, it must make sure to explicitly call this implementation.
+  bool doInitialization(Module &M) override;
+
+  /// Shut down the asmprinter. If you override this in your pass, you must make
+  /// sure to call it explicitly.
+  bool doFinalization(Module &M) override;
+
+  /// Emit the specified function out to the OutStreamer.
+  bool runOnMachineFunction(MachineFunction &MF) override {
+    SetupMachineFunction(MF);
+    EmitFunctionBody();
+    return false;
+  }
+
+  //===------------------------------------------------------------------===//
+  // Coarse grained IR lowering routines.
+  //===------------------------------------------------------------------===//
+
+  /// This should be called when a new MachineFunction is being processed from
+  /// runOnMachineFunction.
+  void SetupMachineFunction(MachineFunction &MF);
+
+  /// This method emits the body and trailer for a function.
+  void EmitFunctionBody();
+
+  void emitCFIInstruction(const MachineInstr &MI);
+
+  void emitFrameAlloc(const MachineInstr &MI);
+
+  void emitStackSizeSection(const MachineFunction &MF);
+
+  enum CFIMoveType { CFI_M_None, CFI_M_EH, CFI_M_Debug };
+  CFIMoveType needsCFIMoves() const;
+
+  /// Returns false if needsCFIMoves() == CFI_M_EH for any function
+  /// in the module.
+  bool needsOnlyDebugCFIMoves() const { return isCFIMoveForDebugging; }
+
+  bool needsSEHMoves();
+
+  /// Print to the current output stream assembly representations of the
+  /// constants in the constant pool MCP. This is used to print out constants
+  /// which have been "spilled to memory" by the code generator.
+  virtual void EmitConstantPool();
+
+  /// Print assembly representations of the jump tables used by the current
+  /// function to the current output stream.
+  virtual void EmitJumpTableInfo();
+
+  /// Emit the specified global variable to the .s file.
+  virtual void EmitGlobalVariable(const GlobalVariable *GV);
+
+  /// Check to see if the specified global is a special global used by LLVM. If
+  /// so, emit it and return true, otherwise do nothing and return false.
+  bool EmitSpecialLLVMGlobal(const GlobalVariable *GV);
+
+  /// Emit an alignment directive to the specified power of two boundary. For
+  /// example, if you pass in 3 here, you will get an 8 byte alignment. If a
+  /// global value is specified, and if that global has an explicit alignment
+  /// requested, it will override the alignment request if required for
+  /// correctness.
+  void EmitAlignment(unsigned NumBits, const GlobalObject *GO = nullptr) const;
+
+  /// Lower the specified LLVM Constant to an MCExpr.
+  virtual const MCExpr *lowerConstant(const Constant *CV);
+
+  /// \brief Print a general LLVM constant to the .s file.
+  void EmitGlobalConstant(const DataLayout &DL, const Constant *CV);
+
+  /// \brief Unnamed constant global variables solely contaning a pointer to
+  /// another globals variable act like a global variable "proxy", or GOT
+  /// equivalents, i.e., it's only used to hold the address of the latter. One
+  /// optimization is to replace accesses to these proxies by using the GOT
+  /// entry for the final global instead. Hence, we select GOT equivalent
+  /// candidates among all the module global variables, avoid emitting them
+  /// unnecessarily and finally replace references to them by pc relative
+  /// accesses to GOT entries.
+  void computeGlobalGOTEquivs(Module &M);
+
+  /// \brief Constant expressions using GOT equivalent globals may not be
+  /// eligible for PC relative GOT entry conversion, in such cases we need to
+  /// emit the proxies we previously omitted in EmitGlobalVariable.
+  void emitGlobalGOTEquivs();
+
+  //===------------------------------------------------------------------===//
+  // Overridable Hooks
+  //===------------------------------------------------------------------===//
+
+  // Targets can, or in the case of EmitInstruction, must implement these to
+  // customize output.
+
+  /// This virtual method can be overridden by targets that want to emit
+  /// something at the start of their file.
+  virtual void EmitStartOfAsmFile(Module &) {}
+
+  /// This virtual method can be overridden by targets that want to emit
+  /// something at the end of their file.
+  virtual void EmitEndOfAsmFile(Module &) {}
+
+  /// Targets can override this to emit stuff before the first basic block in
+  /// the function.
+  virtual void EmitFunctionBodyStart() {}
+
+  /// Targets can override this to emit stuff after the last basic block in the
+  /// function.
+  virtual void EmitFunctionBodyEnd() {}
+
+  /// Targets can override this to emit stuff at the start of a basic block.
+  /// By default, this method prints the label for the specified
+  /// MachineBasicBlock, an alignment (if present) and a comment describing it
+  /// if appropriate.
+  virtual void EmitBasicBlockStart(const MachineBasicBlock &MBB) const;
+
+  /// Targets can override this to emit stuff at the end of a basic block.
+  virtual void EmitBasicBlockEnd(const MachineBasicBlock &MBB);
+
+  /// Targets should implement this to emit instructions.
+  virtual void EmitInstruction(const MachineInstr *) {
+    llvm_unreachable("EmitInstruction not implemented");
+  }
+
+  /// Return the symbol for the specified constant pool entry.
+  virtual MCSymbol *GetCPISymbol(unsigned CPID) const;
+
+  virtual void EmitFunctionEntryLabel();
+
+  virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV);
+
+  /// Targets can override this to change how global constants that are part of
+  /// a C++ static/global constructor list are emitted.
+  virtual void EmitXXStructor(const DataLayout &DL, const Constant *CV) {
+    EmitGlobalConstant(DL, CV);
+  }
+
+  /// Return true if the basic block has exactly one predecessor and the control
+  /// transfer mechanism between the predecessor and this block is a
+  /// fall-through.
+  virtual bool
+  isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const;
+
+  /// Targets can override this to customize the output of IMPLICIT_DEF
+  /// instructions in verbose mode.
+  virtual void emitImplicitDef(const MachineInstr *MI) const;
+
+  //===------------------------------------------------------------------===//
+  // Symbol Lowering Routines.
+  //===------------------------------------------------------------------===//
+
+  MCSymbol *createTempSymbol(const Twine &Name) const;
+
+  /// Return the MCSymbol for a private symbol with global value name as its
+  /// base, with the specified suffix.
+  MCSymbol *getSymbolWithGlobalValueBase(const GlobalValue *GV,
+                                         StringRef Suffix) const;
+
+  /// Return the MCSymbol for the specified ExternalSymbol.
+  MCSymbol *GetExternalSymbolSymbol(StringRef Sym) const;
+
+  /// Return the symbol for the specified jump table entry.
+  MCSymbol *GetJTISymbol(unsigned JTID, bool isLinkerPrivate = false) const;
+
+  /// Return the symbol for the specified jump table .set
+  /// FIXME: privatize to AsmPrinter.
+  MCSymbol *GetJTSetSymbol(unsigned UID, unsigned MBBID) const;
+
+  /// Return the MCSymbol used to satisfy BlockAddress uses of the specified
+  /// basic block.
+  MCSymbol *GetBlockAddressSymbol(const BlockAddress *BA) const;
+  MCSymbol *GetBlockAddressSymbol(const BasicBlock *BB) const;
+
+  //===------------------------------------------------------------------===//
+  // Emission Helper Routines.
+  //===------------------------------------------------------------------===//
+
+  /// This is just convenient handler for printing offsets.
+  void printOffset(int64_t Offset, raw_ostream &OS) const;
+
+  /// Emit a byte directive and value.
+  void emitInt8(int Value) const;
+
+  /// Emit a short directive and value.
+  void emitInt16(int Value) const;
+
+  /// Emit a long directive and value.
+  void emitInt32(int Value) const;
+
+  /// Emit something like ".long Hi-Lo" where the size in bytes of the directive
+  /// is specified by Size and Hi/Lo specify the labels.  This implicitly uses
+  /// .set if it is available.
+  void EmitLabelDifference(const MCSymbol *Hi, const MCSymbol *Lo,
+                           unsigned Size) const;
+
+  /// Emit something like ".uleb128 Hi-Lo".
+  void EmitLabelDifferenceAsULEB128(const MCSymbol *Hi,
+                                    const MCSymbol *Lo) const;
+
+  /// Emit something like ".long Label+Offset" where the size in bytes of the
+  /// directive is specified by Size and Label specifies the label.  This
+  /// implicitly uses .set if it is available.
+  void EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
+                           unsigned Size, bool IsSectionRelative = false) const;
+
+  /// Emit something like ".long Label" where the size in bytes of the directive
+  /// is specified by Size and Label specifies the label.
+  void EmitLabelReference(const MCSymbol *Label, unsigned Size,
+                          bool IsSectionRelative = false) const {
+    EmitLabelPlusOffset(Label, 0, Size, IsSectionRelative);
+  }
+
+  /// Emit something like ".long Label + Offset".
+  void EmitDwarfOffset(const MCSymbol *Label, uint64_t Offset) const;
+
+  //===------------------------------------------------------------------===//
+  // Dwarf Emission Helper Routines
+  //===------------------------------------------------------------------===//
+
+  /// Emit the specified signed leb128 value.
+  void EmitSLEB128(int64_t Value, const char *Desc = nullptr) const;
+
+  /// Emit the specified unsigned leb128 value.
+  void EmitULEB128(uint64_t Value, const char *Desc = nullptr) const;
+
+  /// Emit a .byte 42 directive that corresponds to an encoding.  If verbose
+  /// assembly output is enabled, we output comments describing the encoding.
+  /// Desc is a string saying what the encoding is specifying (e.g. "LSDA").
+  void EmitEncodingByte(unsigned Val, const char *Desc = nullptr) const;
+
+  /// Return the size of the encoding in bytes.
+  unsigned GetSizeOfEncodedValue(unsigned Encoding) const;
+
+  /// Emit reference to a ttype global with a specified encoding.
+  void EmitTTypeReference(const GlobalValue *GV, unsigned Encoding) const;
+
+  /// Emit a reference to a symbol for use in dwarf. Different object formats
+  /// represent this in different ways. Some use a relocation others encode
+  /// the label offset in its section.
+  void emitDwarfSymbolReference(const MCSymbol *Label,
+                                bool ForceOffset = false) const;
+
+  /// Emit the 4-byte offset of a string from the start of its section.
+  ///
+  /// When possible, emit a DwarfStringPool section offset without any
+  /// relocations, and without using the symbol.  Otherwise, defers to \a
+  /// emitDwarfSymbolReference().
+  void emitDwarfStringOffset(DwarfStringPoolEntry S) const;
+
+  /// Emit the 4-byte offset of a string from the start of its section.
+  void emitDwarfStringOffset(DwarfStringPoolEntryRef S) const {
+    emitDwarfStringOffset(S.getEntry());
+  }
+
+  /// Get the value for DW_AT_APPLE_isa. Zero if no isa encoding specified.
+  virtual unsigned getISAEncoding() { return 0; }
+
+  /// Emit the directive and value for debug thread local expression
+  ///
+  /// \p Value - The value to emit.
+  /// \p Size - The size of the integer (in bytes) to emit.
+  virtual void EmitDebugThreadLocal(const MCExpr *Value, unsigned Size) const;
+
+  //===------------------------------------------------------------------===//
+  // Dwarf Lowering Routines
+  //===------------------------------------------------------------------===//
+
+  /// \brief Emit frame instruction to describe the layout of the frame.
+  void emitCFIInstruction(const MCCFIInstruction &Inst) const;
+
+  /// \brief Emit Dwarf abbreviation table.
+  template <typename T> void emitDwarfAbbrevs(const T &Abbrevs) const {
+    // For each abbreviation.
+    for (const auto &Abbrev : Abbrevs)
+      emitDwarfAbbrev(*Abbrev);
+
+    // Mark end of abbreviations.
+    EmitULEB128(0, "EOM(3)");
+  }
+
+  void emitDwarfAbbrev(const DIEAbbrev &Abbrev) const;
+
+  /// \brief Recursively emit Dwarf DIE tree.
+  void emitDwarfDIE(const DIE &Die) const;
+
+  //===------------------------------------------------------------------===//
+  // Inline Asm Support
+  //===------------------------------------------------------------------===//
+
+  // These are hooks that targets can override to implement inline asm
+  // support.  These should probably be moved out of AsmPrinter someday.
+
+  /// Print information related to the specified machine instr that is
+  /// independent of the operand, and may be independent of the instr itself.
+  /// This can be useful for portably encoding the comment character or other
+  /// bits of target-specific knowledge into the asmstrings.  The syntax used is
+  /// ${:comment}.  Targets can override this to add support for their own
+  /// strange codes.
+  virtual void PrintSpecial(const MachineInstr *MI, raw_ostream &OS,
+                            const char *Code) const;
+
+  /// Print the specified operand of MI, an INLINEASM instruction, using the
+  /// specified assembler variant.  Targets should override this to format as
+  /// appropriate.  This method can return true if the operand is erroneous.
+  virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+                               unsigned AsmVariant, const char *ExtraCode,
+                               raw_ostream &OS);
+
+  /// Print the specified operand of MI, an INLINEASM instruction, using the
+  /// specified assembler variant as an address. Targets should override this to
+  /// format as appropriate.  This method can return true if the operand is
+  /// erroneous.
+  virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+                                     unsigned AsmVariant, const char *ExtraCode,
+                                     raw_ostream &OS);
+
+  /// Let the target do anything it needs to do before emitting inlineasm.
+  /// \p StartInfo - the subtarget info before parsing inline asm
+  virtual void emitInlineAsmStart() const;
+
+  /// Let the target do anything it needs to do after emitting inlineasm.
+  /// This callback can be used restore the original mode in case the
+  /// inlineasm contains directives to switch modes.
+  /// \p StartInfo - the original subtarget info before inline asm
+  /// \p EndInfo   - the final subtarget info after parsing the inline asm,
+  ///                or NULL if the value is unknown.
+  virtual void emitInlineAsmEnd(const MCSubtargetInfo &StartInfo,
+                                const MCSubtargetInfo *EndInfo) const;
+
+private:
+  /// Private state for PrintSpecial()
+  // Assign a unique ID to this machine instruction.
+  mutable const MachineInstr *LastMI = nullptr;
+  mutable unsigned LastFn = 0;
+  mutable unsigned Counter = ~0U;
+
+  /// This method emits the header for the current function.
+  virtual void EmitFunctionHeader();
+
+  /// Emit a blob of inline asm to the output streamer.
+  void
+  EmitInlineAsm(StringRef Str, const MCSubtargetInfo &STI,
+                const MCTargetOptions &MCOptions,
+                const MDNode *LocMDNode = nullptr,
+                InlineAsm::AsmDialect AsmDialect = InlineAsm::AD_ATT) const;
+
+  /// This method formats and emits the specified machine instruction that is an
+  /// inline asm.
+  void EmitInlineAsm(const MachineInstr *MI) const;
+
+  //===------------------------------------------------------------------===//
+  // Internal Implementation Details
+  //===------------------------------------------------------------------===//
+
+  /// This emits visibility information about symbol, if this is supported by
+  /// the target.
+  void EmitVisibility(MCSymbol *Sym, unsigned Visibility,
+                      bool IsDefinition = true) const;
+
+  void EmitLinkage(const GlobalValue *GV, MCSymbol *GVSym) const;
+
+  void EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
+                          const MachineBasicBlock *MBB, unsigned uid) const;
+  void EmitLLVMUsedList(const ConstantArray *InitList);
+  /// Emit llvm.ident metadata in an '.ident' directive.
+  void EmitModuleIdents(Module &M);
+  void EmitXXStructorList(const DataLayout &DL, const Constant *List,
+                          bool isCtor);
+
+  GCMetadataPrinter *GetOrCreateGCPrinter(GCStrategy &C);
+  /// Emit GlobalAlias or GlobalIFunc.
+  void emitGlobalIndirectSymbol(Module &M,
+                                const GlobalIndirectSymbol& GIS);
+  void setupCodePaddingContext(const MachineBasicBlock &MBB,
+                               MCCodePaddingContext &Context) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_ASMPRINTER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/AtomicExpandUtils.h b/linux-x64/clang/include/llvm/CodeGen/AtomicExpandUtils.h
new file mode 100644
index 0000000..1f9c96b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/AtomicExpandUtils.h
@@ -0,0 +1,65 @@
+//===- AtomicExpandUtils.h - Utilities for expanding atomic instructions --===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ATOMICEXPANDUTILS_H
+#define LLVM_CODEGEN_ATOMICEXPANDUTILS_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/Support/AtomicOrdering.h"
+
+namespace llvm {
+
+class AtomicRMWInst;
+class Value;
+
+/// Parameters (see the expansion example below):
+/// (the builder, %addr, %loaded, %new_val, ordering,
+///  /* OUT */ %success, /* OUT */ %new_loaded)
+using CreateCmpXchgInstFun =
+    function_ref<void(IRBuilder<> &, Value *, Value *, Value *, AtomicOrdering,
+                      Value *&, Value *&)>;
+
+/// \brief Expand an atomic RMW instruction into a loop utilizing
+/// cmpxchg. You'll want to make sure your target machine likes cmpxchg
+/// instructions in the first place and that there isn't another, better,
+/// transformation available (for example AArch32/AArch64 have linked loads).
+///
+/// This is useful in passes which can't rewrite the more exotic RMW
+/// instructions directly into a platform specific intrinsics (because, say,
+/// those intrinsics don't exist). If such a pass is able to expand cmpxchg
+/// instructions directly however, then, with this function, it could avoid two
+/// extra module passes (avoiding passes by `-atomic-expand` and itself). A
+/// specific example would be PNaCl's `RewriteAtomics` pass.
+///
+/// Given: atomicrmw some_op iN* %addr, iN %incr ordering
+///
+/// The standard expansion we produce is:
+///     [...]
+///     %init_loaded = load atomic iN* %addr
+///     br label %loop
+/// loop:
+///     %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
+///     %new = some_op iN %loaded, %incr
+/// ; This is what -atomic-expand will produce using this function on i686
+/// targets:
+///     %pair = cmpxchg iN* %addr, iN %loaded, iN %new_val
+///     %new_loaded = extractvalue { iN, i1 } %pair, 0
+///     %success = extractvalue { iN, i1 } %pair, 1
+/// ; End callback produced IR
+///     br i1 %success, label %atomicrmw.end, label %loop
+/// atomicrmw.end:
+///     [...]
+///
+/// Returns true if the containing function was modified.
+bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun Factory);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_ATOMICEXPANDUTILS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/BasicTTIImpl.h b/linux-x64/clang/include/llvm/CodeGen/BasicTTIImpl.h
new file mode 100644
index 0000000..9096263
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/BasicTTIImpl.h
@@ -0,0 +1,1383 @@
+//===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file provides a helper that implements much of the TTI interface in
+/// terms of the target-independent code generator and TargetLowering
+/// interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
+#define LLVM_CODEGEN_BASICTTIIMPL_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/TargetTransformInfoImpl.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachineValueType.h"
+#include "llvm/Support/MathExtras.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <limits>
+#include <utility>
+
+namespace llvm {
+
+class Function;
+class GlobalValue;
+class LLVMContext;
+class ScalarEvolution;
+class SCEV;
+class TargetMachine;
+
+extern cl::opt<unsigned> PartialUnrollingThreshold;
+
+/// \brief Base class which can be used to help build a TTI implementation.
+///
+/// This class provides as much implementation of the TTI interface as is
+/// possible using the target independent parts of the code generator.
+///
+/// In order to subclass it, your class must implement a getST() method to
+/// return the subtarget, and a getTLI() method to return the target lowering.
+/// We need these methods implemented in the derived class so that this class
+/// doesn't have to duplicate storage for them.
+template <typename T>
+class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
+private:
+  using BaseT = TargetTransformInfoImplCRTPBase<T>;
+  using TTI = TargetTransformInfo;
+
+  /// Estimate a cost of shuffle as a sequence of extract and insert
+  /// operations.
+  unsigned getPermuteShuffleOverhead(Type *Ty) {
+    assert(Ty->isVectorTy() && "Can only shuffle vectors");
+    unsigned Cost = 0;
+    // Shuffle cost is equal to the cost of extracting element from its argument
+    // plus the cost of inserting them onto the result vector.
+
+    // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
+    // index 0 of first vector, index 1 of second vector,index 2 of first
+    // vector and finally index 3 of second vector and insert them at index
+    // <0,1,2,3> of result vector.
+    for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
+      Cost += static_cast<T *>(this)
+                  ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
+      Cost += static_cast<T *>(this)
+                  ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
+    }
+    return Cost;
+  }
+
+  /// \brief Local query method delegates up to T which *must* implement this!
+  const TargetSubtargetInfo *getST() const {
+    return static_cast<const T *>(this)->getST();
+  }
+
+  /// \brief Local query method delegates up to T which *must* implement this!
+  const TargetLoweringBase *getTLI() const {
+    return static_cast<const T *>(this)->getTLI();
+  }
+
+  static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
+    switch (M) {
+      case TTI::MIM_Unindexed:
+        return ISD::UNINDEXED;
+      case TTI::MIM_PreInc:
+        return ISD::PRE_INC;
+      case TTI::MIM_PreDec:
+        return ISD::PRE_DEC;
+      case TTI::MIM_PostInc:
+        return ISD::POST_INC;
+      case TTI::MIM_PostDec:
+        return ISD::POST_DEC;
+    }
+    llvm_unreachable("Unexpected MemIndexedMode");
+  }
+
+protected:
+  explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
+      : BaseT(DL) {}
+
+  using TargetTransformInfoImplBase::DL;
+
+public:
+  /// \name Scalar TTI Implementations
+  /// @{
+  bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
+                                      unsigned BitWidth, unsigned AddressSpace,
+                                      unsigned Alignment, bool *Fast) const {
+    EVT E = EVT::getIntegerVT(Context, BitWidth);
+    return getTLI()->allowsMisalignedMemoryAccesses(E, AddressSpace, Alignment, Fast);
+  }
+
+  bool hasBranchDivergence() { return false; }
+
+  bool isSourceOfDivergence(const Value *V) { return false; }
+
+  bool isAlwaysUniform(const Value *V) { return false; }
+
+  unsigned getFlatAddressSpace() {
+    // Return an invalid address space.
+    return -1;
+  }
+
+  bool isLegalAddImmediate(int64_t imm) {
+    return getTLI()->isLegalAddImmediate(imm);
+  }
+
+  bool isLegalICmpImmediate(int64_t imm) {
+    return getTLI()->isLegalICmpImmediate(imm);
+  }
+
+  bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
+                             bool HasBaseReg, int64_t Scale,
+                             unsigned AddrSpace, Instruction *I = nullptr) {
+    TargetLoweringBase::AddrMode AM;
+    AM.BaseGV = BaseGV;
+    AM.BaseOffs = BaseOffset;
+    AM.HasBaseReg = HasBaseReg;
+    AM.Scale = Scale;
+    return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
+  }
+
+  bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty,
+                          const DataLayout &DL) const {
+    EVT VT = getTLI()->getValueType(DL, Ty);
+    return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
+  }
+
+  bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty,
+                           const DataLayout &DL) const {
+    EVT VT = getTLI()->getValueType(DL, Ty);
+    return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
+  }
+
+  bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2) {
+    return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
+  }
+
+  int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
+                           bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
+    TargetLoweringBase::AddrMode AM;
+    AM.BaseGV = BaseGV;
+    AM.BaseOffs = BaseOffset;
+    AM.HasBaseReg = HasBaseReg;
+    AM.Scale = Scale;
+    return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace);
+  }
+
+  bool isTruncateFree(Type *Ty1, Type *Ty2) {
+    return getTLI()->isTruncateFree(Ty1, Ty2);
+  }
+
+  bool isProfitableToHoist(Instruction *I) {
+    return getTLI()->isProfitableToHoist(I);
+  }
+
+  bool useAA() const { return getST()->useAA(); }
+
+  bool isTypeLegal(Type *Ty) {
+    EVT VT = getTLI()->getValueType(DL, Ty);
+    return getTLI()->isTypeLegal(VT);
+  }
+
+  int getGEPCost(Type *PointeeType, const Value *Ptr,
+                 ArrayRef<const Value *> Operands) {
+    return BaseT::getGEPCost(PointeeType, Ptr, Operands);
+  }
+
+  int getExtCost(const Instruction *I, const Value *Src) {
+    if (getTLI()->isExtFree(I))
+      return TargetTransformInfo::TCC_Free;
+
+    if (isa<ZExtInst>(I) || isa<SExtInst>(I))
+      if (const LoadInst *LI = dyn_cast<LoadInst>(Src))
+        if (getTLI()->isExtLoad(LI, I, DL))
+          return TargetTransformInfo::TCC_Free;
+
+    return TargetTransformInfo::TCC_Basic;
+  }
+
+  unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+                            ArrayRef<const Value *> Arguments) {
+    return BaseT::getIntrinsicCost(IID, RetTy, Arguments);
+  }
+
+  unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+                            ArrayRef<Type *> ParamTys) {
+    if (IID == Intrinsic::cttz) {
+      if (getTLI()->isCheapToSpeculateCttz())
+        return TargetTransformInfo::TCC_Basic;
+      return TargetTransformInfo::TCC_Expensive;
+    }
+
+    if (IID == Intrinsic::ctlz) {
+      if (getTLI()->isCheapToSpeculateCtlz())
+        return TargetTransformInfo::TCC_Basic;
+      return TargetTransformInfo::TCC_Expensive;
+    }
+
+    return BaseT::getIntrinsicCost(IID, RetTy, ParamTys);
+  }
+
+  unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
+                                            unsigned &JumpTableSize) {
+    /// Try to find the estimated number of clusters. Note that the number of
+    /// clusters identified in this function could be different from the actural
+    /// numbers found in lowering. This function ignore switches that are
+    /// lowered with a mix of jump table / bit test / BTree. This function was
+    /// initially intended to be used when estimating the cost of switch in
+    /// inline cost heuristic, but it's a generic cost model to be used in other
+    /// places (e.g., in loop unrolling).
+    unsigned N = SI.getNumCases();
+    const TargetLoweringBase *TLI = getTLI();
+    const DataLayout &DL = this->getDataLayout();
+
+    JumpTableSize = 0;
+    bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
+
+    // Early exit if both a jump table and bit test are not allowed.
+    if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
+      return N;
+
+    APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
+    APInt MinCaseVal = MaxCaseVal;
+    for (auto CI : SI.cases()) {
+      const APInt &CaseVal = CI.getCaseValue()->getValue();
+      if (CaseVal.sgt(MaxCaseVal))
+        MaxCaseVal = CaseVal;
+      if (CaseVal.slt(MinCaseVal))
+        MinCaseVal = CaseVal;
+    }
+
+    // Check if suitable for a bit test
+    if (N <= DL.getIndexSizeInBits(0u)) {
+      SmallPtrSet<const BasicBlock *, 4> Dests;
+      for (auto I : SI.cases())
+        Dests.insert(I.getCaseSuccessor());
+
+      if (TLI->isSuitableForBitTests(Dests.size(), N, MinCaseVal, MaxCaseVal,
+                                     DL))
+        return 1;
+    }
+
+    // Check if suitable for a jump table.
+    if (IsJTAllowed) {
+      if (N < 2 || N < TLI->getMinimumJumpTableEntries())
+        return N;
+      uint64_t Range =
+          (MaxCaseVal - MinCaseVal)
+              .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
+      // Check whether a range of clusters is dense enough for a jump table
+      if (TLI->isSuitableForJumpTable(&SI, N, Range)) {
+        JumpTableSize = Range;
+        return 1;
+      }
+    }
+    return N;
+  }
+
+  unsigned getJumpBufAlignment() { return getTLI()->getJumpBufAlignment(); }
+
+  unsigned getJumpBufSize() { return getTLI()->getJumpBufSize(); }
+
+  bool shouldBuildLookupTables() {
+    const TargetLoweringBase *TLI = getTLI();
+    return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
+           TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
+  }
+
+  bool haveFastSqrt(Type *Ty) {
+    const TargetLoweringBase *TLI = getTLI();
+    EVT VT = TLI->getValueType(DL, Ty);
+    return TLI->isTypeLegal(VT) &&
+           TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
+  }
+
+  bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
+    return true;
+  }
+
+  unsigned getFPOpCost(Type *Ty) {
+    // Check whether FADD is available, as a proxy for floating-point in
+    // general.
+    const TargetLoweringBase *TLI = getTLI();
+    EVT VT = TLI->getValueType(DL, Ty);
+    if (TLI->isOperationLegalOrCustomOrPromote(ISD::FADD, VT))
+      return TargetTransformInfo::TCC_Basic;
+    return TargetTransformInfo::TCC_Expensive;
+  }
+
+  unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
+    const TargetLoweringBase *TLI = getTLI();
+    switch (Opcode) {
+    default: break;
+    case Instruction::Trunc:
+      if (TLI->isTruncateFree(OpTy, Ty))
+        return TargetTransformInfo::TCC_Free;
+      return TargetTransformInfo::TCC_Basic;
+    case Instruction::ZExt:
+      if (TLI->isZExtFree(OpTy, Ty))
+        return TargetTransformInfo::TCC_Free;
+      return TargetTransformInfo::TCC_Basic;
+    }
+
+    return BaseT::getOperationCost(Opcode, Ty, OpTy);
+  }
+
+  unsigned getInliningThresholdMultiplier() { return 1; }
+
+  void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
+                               TTI::UnrollingPreferences &UP) {
+    // This unrolling functionality is target independent, but to provide some
+    // motivation for its intended use, for x86:
+
+    // According to the Intel 64 and IA-32 Architectures Optimization Reference
+    // Manual, Intel Core models and later have a loop stream detector (and
+    // associated uop queue) that can benefit from partial unrolling.
+    // The relevant requirements are:
+    //  - The loop must have no more than 4 (8 for Nehalem and later) branches
+    //    taken, and none of them may be calls.
+    //  - The loop can have no more than 18 (28 for Nehalem and later) uops.
+
+    // According to the Software Optimization Guide for AMD Family 15h
+    // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
+    // and loop buffer which can benefit from partial unrolling.
+    // The relevant requirements are:
+    //  - The loop must have fewer than 16 branches
+    //  - The loop must have less than 40 uops in all executed loop branches
+
+    // The number of taken branches in a loop is hard to estimate here, and
+    // benchmarking has revealed that it is better not to be conservative when
+    // estimating the branch count. As a result, we'll ignore the branch limits
+    // until someone finds a case where it matters in practice.
+
+    unsigned MaxOps;
+    const TargetSubtargetInfo *ST = getST();
+    if (PartialUnrollingThreshold.getNumOccurrences() > 0)
+      MaxOps = PartialUnrollingThreshold;
+    else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
+      MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
+    else
+      return;
+
+    // Scan the loop: don't unroll loops with calls.
+    for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E;
+         ++I) {
+      BasicBlock *BB = *I;
+
+      for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
+        if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
+          ImmutableCallSite CS(&*J);
+          if (const Function *F = CS.getCalledFunction()) {
+            if (!static_cast<T *>(this)->isLoweredToCall(F))
+              continue;
+          }
+
+          return;
+        }
+    }
+
+    // Enable runtime and partial unrolling up to the specified size.
+    // Enable using trip count upper bound to unroll loops.
+    UP.Partial = UP.Runtime = UP.UpperBound = true;
+    UP.PartialThreshold = MaxOps;
+
+    // Avoid unrolling when optimizing for size.
+    UP.OptSizeThreshold = 0;
+    UP.PartialOptSizeThreshold = 0;
+
+    // Set number of instructions optimized when "back edge"
+    // becomes "fall through" to default value of 2.
+    UP.BEInsns = 2;
+  }
+
+  int getInstructionLatency(const Instruction *I) {
+    if (isa<LoadInst>(I))
+      return getST()->getSchedModel().DefaultLoadLatency;
+
+    return BaseT::getInstructionLatency(I);
+  }
+
+  /// @}
+
+  /// \name Vector TTI Implementations
+  /// @{
+
+  unsigned getNumberOfRegisters(bool Vector) { return Vector ? 0 : 1; }
+
+  unsigned getRegisterBitWidth(bool Vector) const { return 32; }
+
+  /// Estimate the overhead of scalarizing an instruction. Insert and Extract
+  /// are set if the result needs to be inserted and/or extracted from vectors.
+  unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
+    assert(Ty->isVectorTy() && "Can only scalarize vectors");
+    unsigned Cost = 0;
+
+    for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
+      if (Insert)
+        Cost += static_cast<T *>(this)
+                    ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
+      if (Extract)
+        Cost += static_cast<T *>(this)
+                    ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
+    }
+
+    return Cost;
+  }
+
+  /// Estimate the overhead of scalarizing an instructions unique
+  /// non-constant operands. The types of the arguments are ordinarily
+  /// scalar, in which case the costs are multiplied with VF.
+  unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
+                                            unsigned VF) {
+    unsigned Cost = 0;
+    SmallPtrSet<const Value*, 4> UniqueOperands;
+    for (const Value *A : Args) {
+      if (!isa<Constant>(A) && UniqueOperands.insert(A).second) {
+        Type *VecTy = nullptr;
+        if (A->getType()->isVectorTy()) {
+          VecTy = A->getType();
+          // If A is a vector operand, VF should be 1 or correspond to A.
+          assert((VF == 1 || VF == VecTy->getVectorNumElements()) &&
+                 "Vector argument does not match VF");
+        }
+        else
+          VecTy = VectorType::get(A->getType(), VF);
+
+        Cost += getScalarizationOverhead(VecTy, false, true);
+      }
+    }
+
+    return Cost;
+  }
+
+  unsigned getScalarizationOverhead(Type *VecTy, ArrayRef<const Value *> Args) {
+    assert(VecTy->isVectorTy());
+
+    unsigned Cost = 0;
+
+    Cost += getScalarizationOverhead(VecTy, true, false);
+    if (!Args.empty())
+      Cost += getOperandsScalarizationOverhead(Args,
+                                               VecTy->getVectorNumElements());
+    else
+      // When no information on arguments is provided, we add the cost
+      // associated with one argument as a heuristic.
+      Cost += getScalarizationOverhead(VecTy, false, true);
+
+    return Cost;
+  }
+
+  unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
+
+  unsigned getArithmeticInstrCost(
+      unsigned Opcode, Type *Ty,
+      TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
+      TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
+      TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
+      TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
+      ArrayRef<const Value *> Args = ArrayRef<const Value *>()) {
+    // Check if any of the operands are vector operands.
+    const TargetLoweringBase *TLI = getTLI();
+    int ISD = TLI->InstructionOpcodeToISD(Opcode);
+    assert(ISD && "Invalid opcode");
+
+    std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
+
+    bool IsFloat = Ty->isFPOrFPVectorTy();
+    // Assume that floating point arithmetic operations cost twice as much as
+    // integer operations.
+    unsigned OpCost = (IsFloat ? 2 : 1);
+
+    if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
+      // The operation is legal. Assume it costs 1.
+      // TODO: Once we have extract/insert subvector cost we need to use them.
+      return LT.first * OpCost;
+    }
+
+    if (!TLI->isOperationExpand(ISD, LT.second)) {
+      // If the operation is custom lowered, then assume that the code is twice
+      // as expensive.
+      return LT.first * 2 * OpCost;
+    }
+
+    // Else, assume that we need to scalarize this op.
+    // TODO: If one of the types get legalized by splitting, handle this
+    // similarly to what getCastInstrCost() does.
+    if (Ty->isVectorTy()) {
+      unsigned Num = Ty->getVectorNumElements();
+      unsigned Cost = static_cast<T *>(this)
+                          ->getArithmeticInstrCost(Opcode, Ty->getScalarType());
+      // Return the cost of multiple scalar invocation plus the cost of
+      // inserting and extracting the values.
+      return getScalarizationOverhead(Ty, Args) + Num * Cost;
+    }
+
+    // We don't know anything about this scalar instruction.
+    return OpCost;
+  }
+
+  unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
+                          Type *SubTp) {
+    if (Kind == TTI::SK_Alternate || Kind == TTI::SK_PermuteTwoSrc ||
+        Kind == TTI::SK_PermuteSingleSrc) {
+      return getPermuteShuffleOverhead(Tp);
+    }
+    return 1;
+  }
+
+  unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
+                            const Instruction *I = nullptr) {
+    const TargetLoweringBase *TLI = getTLI();
+    int ISD = TLI->InstructionOpcodeToISD(Opcode);
+    assert(ISD && "Invalid opcode");
+    std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, Src);
+    std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(DL, Dst);
+
+    // Check for NOOP conversions.
+    if (SrcLT.first == DstLT.first &&
+        SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
+
+      // Bitcast between types that are legalized to the same type are free.
+      if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
+        return 0;
+    }
+
+    if (Opcode == Instruction::Trunc &&
+        TLI->isTruncateFree(SrcLT.second, DstLT.second))
+      return 0;
+
+    if (Opcode == Instruction::ZExt &&
+        TLI->isZExtFree(SrcLT.second, DstLT.second))
+      return 0;
+
+    if (Opcode == Instruction::AddrSpaceCast &&
+        TLI->isNoopAddrSpaceCast(Src->getPointerAddressSpace(),
+                                 Dst->getPointerAddressSpace()))
+      return 0;
+
+    // If this is a zext/sext of a load, return 0 if the corresponding
+    // extending load exists on target.
+    if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
+        I && isa<LoadInst>(I->getOperand(0))) {
+        EVT ExtVT = EVT::getEVT(Dst);
+        EVT LoadVT = EVT::getEVT(Src);
+        unsigned LType =
+          ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
+        if (TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
+          return 0;
+    }
+
+    // If the cast is marked as legal (or promote) then assume low cost.
+    if (SrcLT.first == DstLT.first &&
+        TLI->isOperationLegalOrPromote(ISD, DstLT.second))
+      return 1;
+
+    // Handle scalar conversions.
+    if (!Src->isVectorTy() && !Dst->isVectorTy()) {
+      // Scalar bitcasts are usually free.
+      if (Opcode == Instruction::BitCast)
+        return 0;
+
+      // Just check the op cost. If the operation is legal then assume it costs
+      // 1.
+      if (!TLI->isOperationExpand(ISD, DstLT.second))
+        return 1;
+
+      // Assume that illegal scalar instruction are expensive.
+      return 4;
+    }
+
+    // Check vector-to-vector casts.
+    if (Dst->isVectorTy() && Src->isVectorTy()) {
+      // If the cast is between same-sized registers, then the check is simple.
+      if (SrcLT.first == DstLT.first &&
+          SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
+
+        // Assume that Zext is done using AND.
+        if (Opcode == Instruction::ZExt)
+          return 1;
+
+        // Assume that sext is done using SHL and SRA.
+        if (Opcode == Instruction::SExt)
+          return 2;
+
+        // Just check the op cost. If the operation is legal then assume it
+        // costs
+        // 1 and multiply by the type-legalization overhead.
+        if (!TLI->isOperationExpand(ISD, DstLT.second))
+          return SrcLT.first * 1;
+      }
+
+      // If we are legalizing by splitting, query the concrete TTI for the cost
+      // of casting the original vector twice. We also need to factor in the
+      // cost of the split itself. Count that as 1, to be consistent with
+      // TLI->getTypeLegalizationCost().
+      if ((TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
+           TargetLowering::TypeSplitVector) ||
+          (TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
+           TargetLowering::TypeSplitVector)) {
+        Type *SplitDst = VectorType::get(Dst->getVectorElementType(),
+                                         Dst->getVectorNumElements() / 2);
+        Type *SplitSrc = VectorType::get(Src->getVectorElementType(),
+                                         Src->getVectorNumElements() / 2);
+        T *TTI = static_cast<T *>(this);
+        return TTI->getVectorSplitCost() +
+               (2 * TTI->getCastInstrCost(Opcode, SplitDst, SplitSrc, I));
+      }
+
+      // In other cases where the source or destination are illegal, assume
+      // the operation will get scalarized.
+      unsigned Num = Dst->getVectorNumElements();
+      unsigned Cost = static_cast<T *>(this)->getCastInstrCost(
+          Opcode, Dst->getScalarType(), Src->getScalarType(), I);
+
+      // Return the cost of multiple scalar invocation plus the cost of
+      // inserting and extracting the values.
+      return getScalarizationOverhead(Dst, true, true) + Num * Cost;
+    }
+
+    // We already handled vector-to-vector and scalar-to-scalar conversions.
+    // This
+    // is where we handle bitcast between vectors and scalars. We need to assume
+    //  that the conversion is scalarized in one way or another.
+    if (Opcode == Instruction::BitCast)
+      // Illegal bitcasts are done by storing and loading from a stack slot.
+      return (Src->isVectorTy() ? getScalarizationOverhead(Src, false, true)
+                                : 0) +
+             (Dst->isVectorTy() ? getScalarizationOverhead(Dst, true, false)
+                                : 0);
+
+    llvm_unreachable("Unhandled cast");
+  }
+
+  unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst,
+                                    VectorType *VecTy, unsigned Index) {
+    return static_cast<T *>(this)->getVectorInstrCost(
+               Instruction::ExtractElement, VecTy, Index) +
+           static_cast<T *>(this)->getCastInstrCost(Opcode, Dst,
+                                                    VecTy->getElementType());
+  }
+
+  unsigned getCFInstrCost(unsigned Opcode) {
+    // Branches are assumed to be predicted.
+    return 0;
+  }
+
+  unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
+                              const Instruction *I) {
+    const TargetLoweringBase *TLI = getTLI();
+    int ISD = TLI->InstructionOpcodeToISD(Opcode);
+    assert(ISD && "Invalid opcode");
+
+    // Selects on vectors are actually vector selects.
+    if (ISD == ISD::SELECT) {
+      assert(CondTy && "CondTy must exist");
+      if (CondTy->isVectorTy())
+        ISD = ISD::VSELECT;
+    }
+    std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
+
+    if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
+        !TLI->isOperationExpand(ISD, LT.second)) {
+      // The operation is legal. Assume it costs 1. Multiply
+      // by the type-legalization overhead.
+      return LT.first * 1;
+    }
+
+    // Otherwise, assume that the cast is scalarized.
+    // TODO: If one of the types get legalized by splitting, handle this
+    // similarly to what getCastInstrCost() does.
+    if (ValTy->isVectorTy()) {
+      unsigned Num = ValTy->getVectorNumElements();
+      if (CondTy)
+        CondTy = CondTy->getScalarType();
+      unsigned Cost = static_cast<T *>(this)->getCmpSelInstrCost(
+          Opcode, ValTy->getScalarType(), CondTy, I);
+
+      // Return the cost of multiple scalar invocation plus the cost of
+      // inserting and extracting the values.
+      return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
+    }
+
+    // Unknown scalar opcode.
+    return 1;
+  }
+
+  unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
+    std::pair<unsigned, MVT> LT =
+        getTLI()->getTypeLegalizationCost(DL, Val->getScalarType());
+
+    return LT.first;
+  }
+
+  unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+                       unsigned AddressSpace, const Instruction *I = nullptr) {
+    assert(!Src->isVoidTy() && "Invalid type");
+    std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Src);
+
+    // Assuming that all loads of legal types cost 1.
+    unsigned Cost = LT.first;
+
+    if (Src->isVectorTy() &&
+        Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
+      // This is a vector load that legalizes to a larger type than the vector
+      // itself. Unless the corresponding extending load or truncating store is
+      // legal, then this will scalarize.
+      TargetLowering::LegalizeAction LA = TargetLowering::Expand;
+      EVT MemVT = getTLI()->getValueType(DL, Src);
+      if (Opcode == Instruction::Store)
+        LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
+      else
+        LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
+
+      if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
+        // This is a vector load/store for some illegal type that is scalarized.
+        // We must account for the cost of building or decomposing the vector.
+        Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store,
+                                         Opcode == Instruction::Store);
+      }
+    }
+
+    return Cost;
+  }
+
+  unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
+                                      unsigned Factor,
+                                      ArrayRef<unsigned> Indices,
+                                      unsigned Alignment,
+                                      unsigned AddressSpace) {
+    VectorType *VT = dyn_cast<VectorType>(VecTy);
+    assert(VT && "Expect a vector type for interleaved memory op");
+
+    unsigned NumElts = VT->getNumElements();
+    assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
+
+    unsigned NumSubElts = NumElts / Factor;
+    VectorType *SubVT = VectorType::get(VT->getElementType(), NumSubElts);
+
+    // Firstly, the cost of load/store operation.
+    unsigned Cost = static_cast<T *>(this)->getMemoryOpCost(
+        Opcode, VecTy, Alignment, AddressSpace);
+
+    // Legalize the vector type, and get the legalized and unlegalized type
+    // sizes.
+    MVT VecTyLT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
+    unsigned VecTySize =
+        static_cast<T *>(this)->getDataLayout().getTypeStoreSize(VecTy);
+    unsigned VecTyLTSize = VecTyLT.getStoreSize();
+
+    // Return the ceiling of dividing A by B.
+    auto ceil = [](unsigned A, unsigned B) { return (A + B - 1) / B; };
+
+    // Scale the cost of the memory operation by the fraction of legalized
+    // instructions that will actually be used. We shouldn't account for the
+    // cost of dead instructions since they will be removed.
+    //
+    // E.g., An interleaved load of factor 8:
+    //       %vec = load <16 x i64>, <16 x i64>* %ptr
+    //       %v0 = shufflevector %vec, undef, <0, 8>
+    //
+    // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
+    // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
+    // type). The other loads are unused.
+    //
+    // We only scale the cost of loads since interleaved store groups aren't
+    // allowed to have gaps.
+    if (Opcode == Instruction::Load && VecTySize > VecTyLTSize) {
+      // The number of loads of a legal type it will take to represent a load
+      // of the unlegalized vector type.
+      unsigned NumLegalInsts = ceil(VecTySize, VecTyLTSize);
+
+      // The number of elements of the unlegalized type that correspond to a
+      // single legal instruction.
+      unsigned NumEltsPerLegalInst = ceil(NumElts, NumLegalInsts);
+
+      // Determine which legal instructions will be used.
+      BitVector UsedInsts(NumLegalInsts, false);
+      for (unsigned Index : Indices)
+        for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
+          UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);
+
+      // Scale the cost of the load by the fraction of legal instructions that
+      // will be used.
+      Cost *= UsedInsts.count() / NumLegalInsts;
+    }
+
+    // Then plus the cost of interleave operation.
+    if (Opcode == Instruction::Load) {
+      // The interleave cost is similar to extract sub vectors' elements
+      // from the wide vector, and insert them into sub vectors.
+      //
+      // E.g. An interleaved load of factor 2 (with one member of index 0):
+      //      %vec = load <8 x i32>, <8 x i32>* %ptr
+      //      %v0 = shuffle %vec, undef, <0, 2, 4, 6>         ; Index 0
+      // The cost is estimated as extract elements at 0, 2, 4, 6 from the
+      // <8 x i32> vector and insert them into a <4 x i32> vector.
+
+      assert(Indices.size() <= Factor &&
+             "Interleaved memory op has too many members");
+
+      for (unsigned Index : Indices) {
+        assert(Index < Factor && "Invalid index for interleaved memory op");
+
+        // Extract elements from loaded vector for each sub vector.
+        for (unsigned i = 0; i < NumSubElts; i++)
+          Cost += static_cast<T *>(this)->getVectorInstrCost(
+              Instruction::ExtractElement, VT, Index + i * Factor);
+      }
+
+      unsigned InsSubCost = 0;
+      for (unsigned i = 0; i < NumSubElts; i++)
+        InsSubCost += static_cast<T *>(this)->getVectorInstrCost(
+            Instruction::InsertElement, SubVT, i);
+
+      Cost += Indices.size() * InsSubCost;
+    } else {
+      // The interleave cost is extract all elements from sub vectors, and
+      // insert them into the wide vector.
+      //
+      // E.g. An interleaved store of factor 2:
+      //      %v0_v1 = shuffle %v0, %v1, <0, 4, 1, 5, 2, 6, 3, 7>
+      //      store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
+      // The cost is estimated as extract all elements from both <4 x i32>
+      // vectors and insert into the <8 x i32> vector.
+
+      unsigned ExtSubCost = 0;
+      for (unsigned i = 0; i < NumSubElts; i++)
+        ExtSubCost += static_cast<T *>(this)->getVectorInstrCost(
+            Instruction::ExtractElement, SubVT, i);
+      Cost += ExtSubCost * Factor;
+
+      for (unsigned i = 0; i < NumElts; i++)
+        Cost += static_cast<T *>(this)
+                    ->getVectorInstrCost(Instruction::InsertElement, VT, i);
+    }
+
+    return Cost;
+  }
+
+  /// Get intrinsic cost based on arguments.
+  unsigned getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
+                                 ArrayRef<Value *> Args, FastMathFlags FMF,
+                                 unsigned VF = 1) {
+    unsigned RetVF = (RetTy->isVectorTy() ? RetTy->getVectorNumElements() : 1);
+    assert((RetVF == 1 || VF == 1) && "VF > 1 and RetVF is a vector type");
+
+    switch (IID) {
+    default: {
+      // Assume that we need to scalarize this intrinsic.
+      SmallVector<Type *, 4> Types;
+      for (Value *Op : Args) {
+        Type *OpTy = Op->getType();
+        assert(VF == 1 || !OpTy->isVectorTy());
+        Types.push_back(VF == 1 ? OpTy : VectorType::get(OpTy, VF));
+      }
+
+      if (VF > 1 && !RetTy->isVoidTy())
+        RetTy = VectorType::get(RetTy, VF);
+
+      // Compute the scalarization overhead based on Args for a vector
+      // intrinsic. A vectorizer will pass a scalar RetTy and VF > 1, while
+      // CostModel will pass a vector RetTy and VF is 1.
+      unsigned ScalarizationCost = std::numeric_limits<unsigned>::max();
+      if (RetVF > 1 || VF > 1) {
+        ScalarizationCost = 0;
+        if (!RetTy->isVoidTy())
+          ScalarizationCost += getScalarizationOverhead(RetTy, true, false);
+        ScalarizationCost += getOperandsScalarizationOverhead(Args, VF);
+      }
+
+      return static_cast<T *>(this)->
+        getIntrinsicInstrCost(IID, RetTy, Types, FMF, ScalarizationCost);
+    }
+    case Intrinsic::masked_scatter: {
+      assert(VF == 1 && "Can't vectorize types here.");
+      Value *Mask = Args[3];
+      bool VarMask = !isa<Constant>(Mask);
+      unsigned Alignment = cast<ConstantInt>(Args[2])->getZExtValue();
+      return
+        static_cast<T *>(this)->getGatherScatterOpCost(Instruction::Store,
+                                                       Args[0]->getType(),
+                                                       Args[1], VarMask,
+                                                       Alignment);
+    }
+    case Intrinsic::masked_gather: {
+      assert(VF == 1 && "Can't vectorize types here.");
+      Value *Mask = Args[2];
+      bool VarMask = !isa<Constant>(Mask);
+      unsigned Alignment = cast<ConstantInt>(Args[1])->getZExtValue();
+      return
+        static_cast<T *>(this)->getGatherScatterOpCost(Instruction::Load,
+                                                       RetTy, Args[0], VarMask,
+                                                       Alignment);
+    }
+    case Intrinsic::experimental_vector_reduce_add:
+    case Intrinsic::experimental_vector_reduce_mul:
+    case Intrinsic::experimental_vector_reduce_and:
+    case Intrinsic::experimental_vector_reduce_or:
+    case Intrinsic::experimental_vector_reduce_xor:
+    case Intrinsic::experimental_vector_reduce_fadd:
+    case Intrinsic::experimental_vector_reduce_fmul:
+    case Intrinsic::experimental_vector_reduce_smax:
+    case Intrinsic::experimental_vector_reduce_smin:
+    case Intrinsic::experimental_vector_reduce_fmax:
+    case Intrinsic::experimental_vector_reduce_fmin:
+    case Intrinsic::experimental_vector_reduce_umax:
+    case Intrinsic::experimental_vector_reduce_umin:
+      return getIntrinsicInstrCost(IID, RetTy, Args[0]->getType(), FMF);
+    }
+  }
+
+  /// Get intrinsic cost based on argument types.
+  /// If ScalarizationCostPassed is std::numeric_limits<unsigned>::max(), the
+  /// cost of scalarizing the arguments and the return value will be computed
+  /// based on types.
+  unsigned getIntrinsicInstrCost(
+      Intrinsic::ID IID, Type *RetTy, ArrayRef<Type *> Tys, FastMathFlags FMF,
+      unsigned ScalarizationCostPassed = std::numeric_limits<unsigned>::max()) {
+    SmallVector<unsigned, 2> ISDs;
+    unsigned SingleCallCost = 10; // Library call cost. Make it expensive.
+    switch (IID) {
+    default: {
+      // Assume that we need to scalarize this intrinsic.
+      unsigned ScalarizationCost = ScalarizationCostPassed;
+      unsigned ScalarCalls = 1;
+      Type *ScalarRetTy = RetTy;
+      if (RetTy->isVectorTy()) {
+        if (ScalarizationCostPassed == std::numeric_limits<unsigned>::max())
+          ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
+        ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
+        ScalarRetTy = RetTy->getScalarType();
+      }
+      SmallVector<Type *, 4> ScalarTys;
+      for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
+        Type *Ty = Tys[i];
+        if (Ty->isVectorTy()) {
+          if (ScalarizationCostPassed == std::numeric_limits<unsigned>::max())
+            ScalarizationCost += getScalarizationOverhead(Ty, false, true);
+          ScalarCalls = std::max(ScalarCalls, Ty->getVectorNumElements());
+          Ty = Ty->getScalarType();
+        }
+        ScalarTys.push_back(Ty);
+      }
+      if (ScalarCalls == 1)
+        return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
+
+      unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
+          IID, ScalarRetTy, ScalarTys, FMF);
+
+      return ScalarCalls * ScalarCost + ScalarizationCost;
+    }
+    // Look for intrinsics that can be lowered directly or turned into a scalar
+    // intrinsic call.
+    case Intrinsic::sqrt:
+      ISDs.push_back(ISD::FSQRT);
+      break;
+    case Intrinsic::sin:
+      ISDs.push_back(ISD::FSIN);
+      break;
+    case Intrinsic::cos:
+      ISDs.push_back(ISD::FCOS);
+      break;
+    case Intrinsic::exp:
+      ISDs.push_back(ISD::FEXP);
+      break;
+    case Intrinsic::exp2:
+      ISDs.push_back(ISD::FEXP2);
+      break;
+    case Intrinsic::log:
+      ISDs.push_back(ISD::FLOG);
+      break;
+    case Intrinsic::log10:
+      ISDs.push_back(ISD::FLOG10);
+      break;
+    case Intrinsic::log2:
+      ISDs.push_back(ISD::FLOG2);
+      break;
+    case Intrinsic::fabs:
+      ISDs.push_back(ISD::FABS);
+      break;
+    case Intrinsic::minnum:
+      ISDs.push_back(ISD::FMINNUM);
+      if (FMF.noNaNs())
+        ISDs.push_back(ISD::FMINNAN);
+      break;
+    case Intrinsic::maxnum:
+      ISDs.push_back(ISD::FMAXNUM);
+      if (FMF.noNaNs())
+        ISDs.push_back(ISD::FMAXNAN);
+      break;
+    case Intrinsic::copysign:
+      ISDs.push_back(ISD::FCOPYSIGN);
+      break;
+    case Intrinsic::floor:
+      ISDs.push_back(ISD::FFLOOR);
+      break;
+    case Intrinsic::ceil:
+      ISDs.push_back(ISD::FCEIL);
+      break;
+    case Intrinsic::trunc:
+      ISDs.push_back(ISD::FTRUNC);
+      break;
+    case Intrinsic::nearbyint:
+      ISDs.push_back(ISD::FNEARBYINT);
+      break;
+    case Intrinsic::rint:
+      ISDs.push_back(ISD::FRINT);
+      break;
+    case Intrinsic::round:
+      ISDs.push_back(ISD::FROUND);
+      break;
+    case Intrinsic::pow:
+      ISDs.push_back(ISD::FPOW);
+      break;
+    case Intrinsic::fma:
+      ISDs.push_back(ISD::FMA);
+      break;
+    case Intrinsic::fmuladd:
+      ISDs.push_back(ISD::FMA);
+      break;
+    // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
+    case Intrinsic::lifetime_start:
+    case Intrinsic::lifetime_end:
+    case Intrinsic::sideeffect:
+      return 0;
+    case Intrinsic::masked_store:
+      return static_cast<T *>(this)
+          ->getMaskedMemoryOpCost(Instruction::Store, Tys[0], 0, 0);
+    case Intrinsic::masked_load:
+      return static_cast<T *>(this)
+          ->getMaskedMemoryOpCost(Instruction::Load, RetTy, 0, 0);
+    case Intrinsic::experimental_vector_reduce_add:
+      return static_cast<T *>(this)->getArithmeticReductionCost(
+          Instruction::Add, Tys[0], /*IsPairwiseForm=*/false);
+    case Intrinsic::experimental_vector_reduce_mul:
+      return static_cast<T *>(this)->getArithmeticReductionCost(
+          Instruction::Mul, Tys[0], /*IsPairwiseForm=*/false);
+    case Intrinsic::experimental_vector_reduce_and:
+      return static_cast<T *>(this)->getArithmeticReductionCost(
+          Instruction::And, Tys[0], /*IsPairwiseForm=*/false);
+    case Intrinsic::experimental_vector_reduce_or:
+      return static_cast<T *>(this)->getArithmeticReductionCost(
+          Instruction::Or, Tys[0], /*IsPairwiseForm=*/false);
+    case Intrinsic::experimental_vector_reduce_xor:
+      return static_cast<T *>(this)->getArithmeticReductionCost(
+          Instruction::Xor, Tys[0], /*IsPairwiseForm=*/false);
+    case Intrinsic::experimental_vector_reduce_fadd:
+      return static_cast<T *>(this)->getArithmeticReductionCost(
+          Instruction::FAdd, Tys[0], /*IsPairwiseForm=*/false);
+    case Intrinsic::experimental_vector_reduce_fmul:
+      return static_cast<T *>(this)->getArithmeticReductionCost(
+          Instruction::FMul, Tys[0], /*IsPairwiseForm=*/false);
+    case Intrinsic::experimental_vector_reduce_smax:
+    case Intrinsic::experimental_vector_reduce_smin:
+    case Intrinsic::experimental_vector_reduce_fmax:
+    case Intrinsic::experimental_vector_reduce_fmin:
+      return static_cast<T *>(this)->getMinMaxReductionCost(
+          Tys[0], CmpInst::makeCmpResultType(Tys[0]), /*IsPairwiseForm=*/false,
+          /*IsSigned=*/true);
+    case Intrinsic::experimental_vector_reduce_umax:
+    case Intrinsic::experimental_vector_reduce_umin:
+      return static_cast<T *>(this)->getMinMaxReductionCost(
+          Tys[0], CmpInst::makeCmpResultType(Tys[0]), /*IsPairwiseForm=*/false,
+          /*IsSigned=*/false);
+    case Intrinsic::ctpop:
+      ISDs.push_back(ISD::CTPOP);
+      // In case of legalization use TCC_Expensive. This is cheaper than a
+      // library call but still not a cheap instruction.
+      SingleCallCost = TargetTransformInfo::TCC_Expensive;
+      break;
+    // FIXME: ctlz, cttz, ...
+    }
+
+    const TargetLoweringBase *TLI = getTLI();
+    std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
+
+    SmallVector<unsigned, 2> LegalCost;
+    SmallVector<unsigned, 2> CustomCost;
+    for (unsigned ISD : ISDs) {
+      if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
+        if (IID == Intrinsic::fabs && TLI->isFAbsFree(LT.second)) {
+          return 0;
+        }
+
+        // The operation is legal. Assume it costs 1.
+        // If the type is split to multiple registers, assume that there is some
+        // overhead to this.
+        // TODO: Once we have extract/insert subvector cost we need to use them.
+        if (LT.first > 1)
+          LegalCost.push_back(LT.first * 2);
+        else
+          LegalCost.push_back(LT.first * 1);
+      } else if (!TLI->isOperationExpand(ISD, LT.second)) {
+        // If the operation is custom lowered then assume
+        // that the code is twice as expensive.
+        CustomCost.push_back(LT.first * 2);
+      }
+    }
+
+    auto MinLegalCostI = std::min_element(LegalCost.begin(), LegalCost.end());
+    if (MinLegalCostI != LegalCost.end())
+      return *MinLegalCostI;
+
+    auto MinCustomCostI = std::min_element(CustomCost.begin(), CustomCost.end());
+    if (MinCustomCostI != CustomCost.end())
+      return *MinCustomCostI;
+
+    // If we can't lower fmuladd into an FMA estimate the cost as a floating
+    // point mul followed by an add.
+    if (IID == Intrinsic::fmuladd)
+      return static_cast<T *>(this)
+                 ->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) +
+             static_cast<T *>(this)
+                 ->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy);
+
+    // Else, assume that we need to scalarize this intrinsic. For math builtins
+    // this will emit a costly libcall, adding call overhead and spills. Make it
+    // very expensive.
+    if (RetTy->isVectorTy()) {
+      unsigned ScalarizationCost =
+          ((ScalarizationCostPassed != std::numeric_limits<unsigned>::max())
+               ? ScalarizationCostPassed
+               : getScalarizationOverhead(RetTy, true, false));
+      unsigned ScalarCalls = RetTy->getVectorNumElements();
+      SmallVector<Type *, 4> ScalarTys;
+      for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
+        Type *Ty = Tys[i];
+        if (Ty->isVectorTy())
+          Ty = Ty->getScalarType();
+        ScalarTys.push_back(Ty);
+      }
+      unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
+          IID, RetTy->getScalarType(), ScalarTys, FMF);
+      for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
+        if (Tys[i]->isVectorTy()) {
+          if (ScalarizationCostPassed == std::numeric_limits<unsigned>::max())
+            ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
+          ScalarCalls = std::max(ScalarCalls, Tys[i]->getVectorNumElements());
+        }
+      }
+
+      return ScalarCalls * ScalarCost + ScalarizationCost;
+    }
+
+    // This is going to be turned into a library call, make it expensive.
+    return SingleCallCost;
+  }
+
+  /// \brief Compute a cost of the given call instruction.
+  ///
+  /// Compute the cost of calling function F with return type RetTy and
+  /// argument types Tys. F might be nullptr, in this case the cost of an
+  /// arbitrary call with the specified signature will be returned.
+  /// This is used, for instance,  when we estimate call of a vector
+  /// counterpart of the given function.
+  /// \param F Called function, might be nullptr.
+  /// \param RetTy Return value types.
+  /// \param Tys Argument types.
+  /// \returns The cost of Call instruction.
+  unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
+    return 10;
+  }
+
+  unsigned getNumberOfParts(Type *Tp) {
+    std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Tp);
+    return LT.first;
+  }
+
+  unsigned getAddressComputationCost(Type *Ty, ScalarEvolution *,
+                                     const SCEV *) {
+    return 0;
+  }
+
+  /// Try to calculate arithmetic and shuffle op costs for reduction operations.
+  /// We're assuming that reduction operation are performing the following way:
+  /// 1. Non-pairwise reduction
+  /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
+  /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
+  ///            \----------------v-------------/  \----------v------------/
+  ///                            n/2 elements               n/2 elements
+  /// %red1 = op <n x t> %val, <n x t> val1
+  /// After this operation we have a vector %red1 where only the first n/2
+  /// elements are meaningful, the second n/2 elements are undefined and can be
+  /// dropped. All other operations are actually working with the vector of
+  /// length n/2, not n, though the real vector length is still n.
+  /// %val2 = shufflevector<n x t> %red1, <n x t> %undef,
+  /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
+  ///            \----------------v-------------/  \----------v------------/
+  ///                            n/4 elements               3*n/4 elements
+  /// %red2 = op <n x t> %red1, <n x t> val2  - working with the vector of
+  /// length n/2, the resulting vector has length n/4 etc.
+  /// 2. Pairwise reduction:
+  /// Everything is the same except for an additional shuffle operation which
+  /// is used to produce operands for pairwise kind of reductions.
+  /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
+  /// <n x i32> <i32 0, i32 2, ..., i32 n-2, i32 undef, ..., i32 undef>
+  ///            \-------------v----------/  \----------v------------/
+  ///                   n/2 elements               n/2 elements
+  /// %val2 = shufflevector<n x t> %val, <n x t> %undef,
+  /// <n x i32> <i32 1, i32 3, ..., i32 n-1, i32 undef, ..., i32 undef>
+  ///            \-------------v----------/  \----------v------------/
+  ///                   n/2 elements               n/2 elements
+  /// %red1 = op <n x t> %val1, <n x t> val2
+  /// Again, the operation is performed on <n x t> vector, but the resulting
+  /// vector %red1 is <n/2 x t> vector.
+  ///
+  /// The cost model should take into account that the actual length of the
+  /// vector is reduced on each iteration.
+  unsigned getArithmeticReductionCost(unsigned Opcode, Type *Ty,
+                                      bool IsPairwise) {
+    assert(Ty->isVectorTy() && "Expect a vector type");
+    Type *ScalarTy = Ty->getVectorElementType();
+    unsigned NumVecElts = Ty->getVectorNumElements();
+    unsigned NumReduxLevels = Log2_32(NumVecElts);
+    unsigned ArithCost = 0;
+    unsigned ShuffleCost = 0;
+    auto *ConcreteTTI = static_cast<T *>(this);
+    std::pair<unsigned, MVT> LT =
+        ConcreteTTI->getTLI()->getTypeLegalizationCost(DL, Ty);
+    unsigned LongVectorCount = 0;
+    unsigned MVTLen =
+        LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
+    while (NumVecElts > MVTLen) {
+      NumVecElts /= 2;
+      // Assume the pairwise shuffles add a cost.
+      ShuffleCost += (IsPairwise + 1) *
+                     ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
+                                                 NumVecElts, Ty);
+      ArithCost += ConcreteTTI->getArithmeticInstrCost(Opcode, Ty);
+      Ty = VectorType::get(ScalarTy, NumVecElts);
+      ++LongVectorCount;
+    }
+    // The minimal length of the vector is limited by the real length of vector
+    // operations performed on the current platform. That's why several final
+    // reduction operations are performed on the vectors with the same
+    // architecture-dependent length.
+    ShuffleCost += (NumReduxLevels - LongVectorCount) * (IsPairwise + 1) *
+                   ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
+                                               NumVecElts, Ty);
+    ArithCost += (NumReduxLevels - LongVectorCount) *
+                 ConcreteTTI->getArithmeticInstrCost(Opcode, Ty);
+    return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true);
+  }
+
+  /// Try to calculate op costs for min/max reduction operations.
+  /// \param CondTy Conditional type for the Select instruction.
+  unsigned getMinMaxReductionCost(Type *Ty, Type *CondTy, bool IsPairwise,
+                                  bool) {
+    assert(Ty->isVectorTy() && "Expect a vector type");
+    Type *ScalarTy = Ty->getVectorElementType();
+    Type *ScalarCondTy = CondTy->getVectorElementType();
+    unsigned NumVecElts = Ty->getVectorNumElements();
+    unsigned NumReduxLevels = Log2_32(NumVecElts);
+    unsigned CmpOpcode;
+    if (Ty->isFPOrFPVectorTy()) {
+      CmpOpcode = Instruction::FCmp;
+    } else {
+      assert(Ty->isIntOrIntVectorTy() &&
+             "expecting floating point or integer type for min/max reduction");
+      CmpOpcode = Instruction::ICmp;
+    }
+    unsigned MinMaxCost = 0;
+    unsigned ShuffleCost = 0;
+    auto *ConcreteTTI = static_cast<T *>(this);
+    std::pair<unsigned, MVT> LT =
+        ConcreteTTI->getTLI()->getTypeLegalizationCost(DL, Ty);
+    unsigned LongVectorCount = 0;
+    unsigned MVTLen =
+        LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
+    while (NumVecElts > MVTLen) {
+      NumVecElts /= 2;
+      // Assume the pairwise shuffles add a cost.
+      ShuffleCost += (IsPairwise + 1) *
+                     ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
+                                                 NumVecElts, Ty);
+      MinMaxCost +=
+          ConcreteTTI->getCmpSelInstrCost(CmpOpcode, Ty, CondTy, nullptr) +
+          ConcreteTTI->getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
+                                          nullptr);
+      Ty = VectorType::get(ScalarTy, NumVecElts);
+      CondTy = VectorType::get(ScalarCondTy, NumVecElts);
+      ++LongVectorCount;
+    }
+    // The minimal length of the vector is limited by the real length of vector
+    // operations performed on the current platform. That's why several final
+    // reduction opertions are perfomed on the vectors with the same
+    // architecture-dependent length.
+    ShuffleCost += (NumReduxLevels - LongVectorCount) * (IsPairwise + 1) *
+                   ConcreteTTI->getShuffleCost(TTI::SK_ExtractSubvector, Ty,
+                                               NumVecElts, Ty);
+    MinMaxCost +=
+        (NumReduxLevels - LongVectorCount) *
+        (ConcreteTTI->getCmpSelInstrCost(CmpOpcode, Ty, CondTy, nullptr) +
+         ConcreteTTI->getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
+                                         nullptr));
+    // Need 3 extractelement instructions for scalarization + an additional
+    // scalar select instruction.
+    return ShuffleCost + MinMaxCost +
+           3 * getScalarizationOverhead(Ty, /*Insert=*/false,
+                                        /*Extract=*/true) +
+           ConcreteTTI->getCmpSelInstrCost(Instruction::Select, ScalarTy,
+                                           ScalarCondTy, nullptr);
+  }
+
+  unsigned getVectorSplitCost() { return 1; }
+
+  /// @}
+};
+
+/// \brief Concrete BasicTTIImpl that can be used if no further customization
+/// is needed.
+class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
+  using BaseT = BasicTTIImplBase<BasicTTIImpl>;
+
+  friend class BasicTTIImplBase<BasicTTIImpl>;
+
+  const TargetSubtargetInfo *ST;
+  const TargetLoweringBase *TLI;
+
+  const TargetSubtargetInfo *getST() const { return ST; }
+  const TargetLoweringBase *getTLI() const { return TLI; }
+
+public:
+  explicit BasicTTIImpl(const TargetMachine *ST, const Function &F);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_BASICTTIIMPL_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/CalcSpillWeights.h b/linux-x64/clang/include/llvm/CodeGen/CalcSpillWeights.h
new file mode 100644
index 0000000..d9e8206
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/CalcSpillWeights.h
@@ -0,0 +1,108 @@
+//===- lib/CodeGen/CalcSpillWeights.h ---------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_CALCSPILLWEIGHTS_H
+#define LLVM_CODEGEN_CALCSPILLWEIGHTS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+
+namespace llvm {
+
+class LiveInterval;
+class LiveIntervals;
+class MachineBlockFrequencyInfo;
+class MachineFunction;
+class MachineLoopInfo;
+class VirtRegMap;
+
+  /// \brief Normalize the spill weight of a live interval
+  ///
+  /// The spill weight of a live interval is computed as:
+  ///
+  ///   (sum(use freq) + sum(def freq)) / (K + size)
+  ///
+  /// @param UseDefFreq Expected number of executed use and def instructions
+  ///                   per function call. Derived from block frequencies.
+  /// @param Size       Size of live interval as returnexd by getSize()
+  /// @param NumInstr   Number of instructions using this live interval
+  static inline float normalizeSpillWeight(float UseDefFreq, unsigned Size,
+                                           unsigned NumInstr) {
+    // The constant 25 instructions is added to avoid depending too much on
+    // accidental SlotIndex gaps for small intervals. The effect is that small
+    // intervals have a spill weight that is mostly proportional to the number
+    // of uses, while large intervals get a spill weight that is closer to a use
+    // density.
+    return UseDefFreq / (Size + 25*SlotIndex::InstrDist);
+  }
+
+  /// \brief Calculate auxiliary information for a virtual register such as its
+  /// spill weight and allocation hint.
+  class VirtRegAuxInfo {
+  public:
+    using NormalizingFn = float (*)(float, unsigned, unsigned);
+
+  private:
+    MachineFunction &MF;
+    LiveIntervals &LIS;
+    VirtRegMap *VRM;
+    const MachineLoopInfo &Loops;
+    const MachineBlockFrequencyInfo &MBFI;
+    DenseMap<unsigned, float> Hint;
+    NormalizingFn normalize;
+
+  public:
+    VirtRegAuxInfo(MachineFunction &mf, LiveIntervals &lis,
+                   VirtRegMap *vrm, const MachineLoopInfo &loops,
+                   const MachineBlockFrequencyInfo &mbfi,
+                   NormalizingFn norm = normalizeSpillWeight)
+        : MF(mf), LIS(lis), VRM(vrm), Loops(loops), MBFI(mbfi), normalize(norm) {}
+
+    /// \brief (re)compute li's spill weight and allocation hint.
+    void calculateSpillWeightAndHint(LiveInterval &li);
+
+    /// \brief Compute future expected spill weight of a split artifact of li
+    /// that will span between start and end slot indexes.
+    /// \param li     The live interval to be split.
+    /// \param start  The expected begining of the split artifact. Instructions
+    ///               before start will not affect the weight.
+    /// \param end    The expected end of the split artifact. Instructions
+    ///               after end will not affect the weight.
+    /// \return The expected spill weight of the split artifact. Returns
+    /// negative weight for unspillable li.
+    float futureWeight(LiveInterval &li, SlotIndex start, SlotIndex end);
+
+    /// \brief Helper function for weight calculations.
+    /// (Re)compute li's spill weight and allocation hint, or, for non null
+    /// start and end - compute future expected spill weight of a split
+    /// artifact of li that will span between start and end slot indexes.
+    /// \param li     The live interval for which to compute the weight.
+    /// \param start  The expected begining of the split artifact. Instructions
+    ///               before start will not affect the weight. Relevant for
+    ///               weight calculation of future split artifact.
+    /// \param end    The expected end of the split artifact. Instructions
+    ///               after end will not affect the weight. Relevant for
+    ///               weight calculation of future split artifact.
+    /// \return The spill weight. Returns negative weight for unspillable li.
+    float weightCalcHelper(LiveInterval &li, SlotIndex *start = nullptr,
+                           SlotIndex *end = nullptr);
+  };
+
+  /// \brief Compute spill weights and allocation hints for all virtual register
+  /// live intervals.
+  void calculateSpillWeightsAndHints(LiveIntervals &LIS, MachineFunction &MF,
+                                     VirtRegMap *VRM,
+                                     const MachineLoopInfo &MLI,
+                                     const MachineBlockFrequencyInfo &MBFI,
+                                     VirtRegAuxInfo::NormalizingFn norm =
+                                         normalizeSpillWeight);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_CALCSPILLWEIGHTS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/CallingConvLower.h b/linux-x64/clang/include/llvm/CodeGen/CallingConvLower.h
new file mode 100644
index 0000000..d30a273
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/CallingConvLower.h
@@ -0,0 +1,576 @@
+//===- llvm/CallingConvLower.h - Calling Conventions ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the CCState and CCValAssign classes, used for lowering
+// and implementing calling conventions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_CALLINGCONVLOWER_H
+#define LLVM_CODEGEN_CALLINGCONVLOWER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/TargetCallingConv.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/MC/MCRegisterInfo.h"
+
+namespace llvm {
+
+class CCState;
+class MVT;
+class TargetMachine;
+class TargetRegisterInfo;
+
+/// CCValAssign - Represent assignment of one arg/retval to a location.
+class CCValAssign {
+public:
+  enum LocInfo {
+    Full,      // The value fills the full location.
+    SExt,      // The value is sign extended in the location.
+    ZExt,      // The value is zero extended in the location.
+    AExt,      // The value is extended with undefined upper bits.
+    SExtUpper, // The value is in the upper bits of the location and should be
+               // sign extended when retrieved.
+    ZExtUpper, // The value is in the upper bits of the location and should be
+               // zero extended when retrieved.
+    AExtUpper, // The value is in the upper bits of the location and should be
+               // extended with undefined upper bits when retrieved.
+    BCvt,      // The value is bit-converted in the location.
+    VExt,      // The value is vector-widened in the location.
+               // FIXME: Not implemented yet. Code that uses AExt to mean
+               // vector-widen should be fixed to use VExt instead.
+    FPExt,     // The floating-point value is fp-extended in the location.
+    Indirect   // The location contains pointer to the value.
+    // TODO: a subset of the value is in the location.
+  };
+
+private:
+  /// ValNo - This is the value number begin assigned (e.g. an argument number).
+  unsigned ValNo;
+
+  /// Loc is either a stack offset or a register number.
+  unsigned Loc;
+
+  /// isMem - True if this is a memory loc, false if it is a register loc.
+  unsigned isMem : 1;
+
+  /// isCustom - True if this arg/retval requires special handling.
+  unsigned isCustom : 1;
+
+  /// Information about how the value is assigned.
+  LocInfo HTP : 6;
+
+  /// ValVT - The type of the value being assigned.
+  MVT ValVT;
+
+  /// LocVT - The type of the location being assigned to.
+  MVT LocVT;
+public:
+
+  static CCValAssign getReg(unsigned ValNo, MVT ValVT,
+                            unsigned RegNo, MVT LocVT,
+                            LocInfo HTP) {
+    CCValAssign Ret;
+    Ret.ValNo = ValNo;
+    Ret.Loc = RegNo;
+    Ret.isMem = false;
+    Ret.isCustom = false;
+    Ret.HTP = HTP;
+    Ret.ValVT = ValVT;
+    Ret.LocVT = LocVT;
+    return Ret;
+  }
+
+  static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT,
+                                  unsigned RegNo, MVT LocVT,
+                                  LocInfo HTP) {
+    CCValAssign Ret;
+    Ret = getReg(ValNo, ValVT, RegNo, LocVT, HTP);
+    Ret.isCustom = true;
+    return Ret;
+  }
+
+  static CCValAssign getMem(unsigned ValNo, MVT ValVT,
+                            unsigned Offset, MVT LocVT,
+                            LocInfo HTP) {
+    CCValAssign Ret;
+    Ret.ValNo = ValNo;
+    Ret.Loc = Offset;
+    Ret.isMem = true;
+    Ret.isCustom = false;
+    Ret.HTP = HTP;
+    Ret.ValVT = ValVT;
+    Ret.LocVT = LocVT;
+    return Ret;
+  }
+
+  static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT,
+                                  unsigned Offset, MVT LocVT,
+                                  LocInfo HTP) {
+    CCValAssign Ret;
+    Ret = getMem(ValNo, ValVT, Offset, LocVT, HTP);
+    Ret.isCustom = true;
+    return Ret;
+  }
+
+  // There is no need to differentiate between a pending CCValAssign and other
+  // kinds, as they are stored in a different list.
+  static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT,
+                                LocInfo HTP, unsigned ExtraInfo = 0) {
+    return getReg(ValNo, ValVT, ExtraInfo, LocVT, HTP);
+  }
+
+  void convertToReg(unsigned RegNo) {
+    Loc = RegNo;
+    isMem = false;
+  }
+
+  void convertToMem(unsigned Offset) {
+    Loc = Offset;
+    isMem = true;
+  }
+
+  unsigned getValNo() const { return ValNo; }
+  MVT getValVT() const { return ValVT; }
+
+  bool isRegLoc() const { return !isMem; }
+  bool isMemLoc() const { return isMem; }
+
+  bool needsCustom() const { return isCustom; }
+
+  unsigned getLocReg() const { assert(isRegLoc()); return Loc; }
+  unsigned getLocMemOffset() const { assert(isMemLoc()); return Loc; }
+  unsigned getExtraInfo() const { return Loc; }
+  MVT getLocVT() const { return LocVT; }
+
+  LocInfo getLocInfo() const { return HTP; }
+  bool isExtInLoc() const {
+    return (HTP == AExt || HTP == SExt || HTP == ZExt);
+  }
+
+  bool isUpperBitsInLoc() const {
+    return HTP == AExtUpper || HTP == SExtUpper || HTP == ZExtUpper;
+  }
+};
+
+/// Describes a register that needs to be forwarded from the prologue to a
+/// musttail call.
+struct ForwardedRegister {
+  ForwardedRegister(unsigned VReg, MCPhysReg PReg, MVT VT)
+      : VReg(VReg), PReg(PReg), VT(VT) {}
+  unsigned VReg;
+  MCPhysReg PReg;
+  MVT VT;
+};
+
+/// CCAssignFn - This function assigns a location for Val, updating State to
+/// reflect the change.  It returns 'true' if it failed to handle Val.
+typedef bool CCAssignFn(unsigned ValNo, MVT ValVT,
+                        MVT LocVT, CCValAssign::LocInfo LocInfo,
+                        ISD::ArgFlagsTy ArgFlags, CCState &State);
+
+/// CCCustomFn - This function assigns a location for Val, possibly updating
+/// all args to reflect changes and indicates if it handled it. It must set
+/// isCustom if it handles the arg and returns true.
+typedef bool CCCustomFn(unsigned &ValNo, MVT &ValVT,
+                        MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+                        ISD::ArgFlagsTy &ArgFlags, CCState &State);
+
+/// CCState - This class holds information needed while lowering arguments and
+/// return values.  It captures which registers are already assigned and which
+/// stack slots are used.  It provides accessors to allocate these values.
+class CCState {
+private:
+  CallingConv::ID CallingConv;
+  bool IsVarArg;
+  bool AnalyzingMustTailForwardedRegs = false;
+  MachineFunction &MF;
+  const TargetRegisterInfo &TRI;
+  SmallVectorImpl<CCValAssign> &Locs;
+  LLVMContext &Context;
+
+  unsigned StackOffset;
+  unsigned MaxStackArgAlign;
+  SmallVector<uint32_t, 16> UsedRegs;
+  SmallVector<CCValAssign, 4> PendingLocs;
+  SmallVector<ISD::ArgFlagsTy, 4> PendingArgFlags;
+
+  // ByValInfo and SmallVector<ByValInfo, 4> ByValRegs:
+  //
+  // Vector of ByValInfo instances (ByValRegs) is introduced for byval registers
+  // tracking.
+  // Or, in another words it tracks byval parameters that are stored in
+  // general purpose registers.
+  //
+  // For 4 byte stack alignment,
+  // instance index means byval parameter number in formal
+  // arguments set. Assume, we have some "struct_type" with size = 4 bytes,
+  // then, for function "foo":
+  //
+  // i32 foo(i32 %p, %struct_type* %r, i32 %s, %struct_type* %t)
+  //
+  // ByValRegs[0] describes how "%r" is stored (Begin == r1, End == r2)
+  // ByValRegs[1] describes how "%t" is stored (Begin == r3, End == r4).
+  //
+  // In case of 8 bytes stack alignment,
+  // ByValRegs may also contain information about wasted registers.
+  // In function shown above, r3 would be wasted according to AAPCS rules.
+  // And in that case ByValRegs[1].Waste would be "true".
+  // ByValRegs vector size still would be 2,
+  // while "%t" goes to the stack: it wouldn't be described in ByValRegs.
+  //
+  // Supposed use-case for this collection:
+  // 1. Initially ByValRegs is empty, InRegsParamsProcessed is 0.
+  // 2. HandleByVal fillups ByValRegs.
+  // 3. Argument analysis (LowerFormatArguments, for example). After
+  // some byval argument was analyzed, InRegsParamsProcessed is increased.
+  struct ByValInfo {
+    ByValInfo(unsigned B, unsigned E, bool IsWaste = false) :
+      Begin(B), End(E), Waste(IsWaste) {}
+    // First register allocated for current parameter.
+    unsigned Begin;
+
+    // First after last register allocated for current parameter.
+    unsigned End;
+
+    // Means that current range of registers doesn't belong to any
+    // parameters. It was wasted due to stack alignment rules.
+    // For more information see:
+    // AAPCS, 5.5 Parameter Passing, Stage C, C.3.
+    bool Waste;
+  };
+  SmallVector<ByValInfo, 4 > ByValRegs;
+
+  // InRegsParamsProcessed - shows how many instances of ByValRegs was proceed
+  // during argument analysis.
+  unsigned InRegsParamsProcessed;
+
+public:
+  CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
+          SmallVectorImpl<CCValAssign> &locs, LLVMContext &C);
+
+  void addLoc(const CCValAssign &V) {
+    Locs.push_back(V);
+  }
+
+  LLVMContext &getContext() const { return Context; }
+  MachineFunction &getMachineFunction() const { return MF; }
+  CallingConv::ID getCallingConv() const { return CallingConv; }
+  bool isVarArg() const { return IsVarArg; }
+
+  /// getNextStackOffset - Return the next stack offset such that all stack
+  /// slots satisfy their alignment requirements.
+  unsigned getNextStackOffset() const {
+    return StackOffset;
+  }
+
+  /// getAlignedCallFrameSize - Return the size of the call frame needed to
+  /// be able to store all arguments and such that the alignment requirement
+  /// of each of the arguments is satisfied.
+  unsigned getAlignedCallFrameSize() const {
+    return alignTo(StackOffset, MaxStackArgAlign);
+  }
+
+  /// isAllocated - Return true if the specified register (or an alias) is
+  /// allocated.
+  bool isAllocated(unsigned Reg) const {
+    return UsedRegs[Reg/32] & (1 << (Reg&31));
+  }
+
+  /// AnalyzeFormalArguments - Analyze an array of argument values,
+  /// incorporating info about the formals into this state.
+  void AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
+                              CCAssignFn Fn);
+
+  /// The function will invoke AnalyzeFormalArguments.
+  void AnalyzeArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
+                        CCAssignFn Fn) {
+    AnalyzeFormalArguments(Ins, Fn);
+  }
+
+  /// AnalyzeReturn - Analyze the returned values of a return,
+  /// incorporating info about the result values into this state.
+  void AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
+                     CCAssignFn Fn);
+
+  /// CheckReturn - Analyze the return values of a function, returning
+  /// true if the return can be performed without sret-demotion, and
+  /// false otherwise.
+  bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
+                   CCAssignFn Fn);
+
+  /// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
+  /// incorporating info about the passed values into this state.
+  void AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
+                           CCAssignFn Fn);
+
+  /// AnalyzeCallOperands - Same as above except it takes vectors of types
+  /// and argument flags.
+  void AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
+                           SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
+                           CCAssignFn Fn);
+
+  /// The function will invoke AnalyzeCallOperands.
+  void AnalyzeArguments(const SmallVectorImpl<ISD::OutputArg> &Outs,
+                        CCAssignFn Fn) {
+    AnalyzeCallOperands(Outs, Fn);
+  }
+
+  /// AnalyzeCallResult - Analyze the return values of a call,
+  /// incorporating info about the passed values into this state.
+  void AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
+                         CCAssignFn Fn);
+
+  /// A shadow allocated register is a register that was allocated
+  /// but wasn't added to the location list (Locs).
+  /// \returns true if the register was allocated as shadow or false otherwise.
+  bool IsShadowAllocatedReg(unsigned Reg) const;
+
+  /// AnalyzeCallResult - Same as above except it's specialized for calls which
+  /// produce a single value.
+  void AnalyzeCallResult(MVT VT, CCAssignFn Fn);
+
+  /// getFirstUnallocated - Return the index of the first unallocated register
+  /// in the set, or Regs.size() if they are all allocated.
+  unsigned getFirstUnallocated(ArrayRef<MCPhysReg> Regs) const {
+    for (unsigned i = 0; i < Regs.size(); ++i)
+      if (!isAllocated(Regs[i]))
+        return i;
+    return Regs.size();
+  }
+
+  /// AllocateReg - Attempt to allocate one register.  If it is not available,
+  /// return zero.  Otherwise, return the register, marking it and any aliases
+  /// as allocated.
+  unsigned AllocateReg(unsigned Reg) {
+    if (isAllocated(Reg)) return 0;
+    MarkAllocated(Reg);
+    return Reg;
+  }
+
+  /// Version of AllocateReg with extra register to be shadowed.
+  unsigned AllocateReg(unsigned Reg, unsigned ShadowReg) {
+    if (isAllocated(Reg)) return 0;
+    MarkAllocated(Reg);
+    MarkAllocated(ShadowReg);
+    return Reg;
+  }
+
+  /// AllocateReg - Attempt to allocate one of the specified registers.  If none
+  /// are available, return zero.  Otherwise, return the first one available,
+  /// marking it and any aliases as allocated.
+  unsigned AllocateReg(ArrayRef<MCPhysReg> Regs) {
+    unsigned FirstUnalloc = getFirstUnallocated(Regs);
+    if (FirstUnalloc == Regs.size())
+      return 0;    // Didn't find the reg.
+
+    // Mark the register and any aliases as allocated.
+    unsigned Reg = Regs[FirstUnalloc];
+    MarkAllocated(Reg);
+    return Reg;
+  }
+
+  /// AllocateRegBlock - Attempt to allocate a block of RegsRequired consecutive
+  /// registers. If this is not possible, return zero. Otherwise, return the first
+  /// register of the block that were allocated, marking the entire block as allocated.
+  unsigned AllocateRegBlock(ArrayRef<MCPhysReg> Regs, unsigned RegsRequired) {
+    if (RegsRequired > Regs.size())
+      return 0;
+
+    for (unsigned StartIdx = 0; StartIdx <= Regs.size() - RegsRequired;
+         ++StartIdx) {
+      bool BlockAvailable = true;
+      // Check for already-allocated regs in this block
+      for (unsigned BlockIdx = 0; BlockIdx < RegsRequired; ++BlockIdx) {
+        if (isAllocated(Regs[StartIdx + BlockIdx])) {
+          BlockAvailable = false;
+          break;
+        }
+      }
+      if (BlockAvailable) {
+        // Mark the entire block as allocated
+        for (unsigned BlockIdx = 0; BlockIdx < RegsRequired; ++BlockIdx) {
+          MarkAllocated(Regs[StartIdx + BlockIdx]);
+        }
+        return Regs[StartIdx];
+      }
+    }
+    // No block was available
+    return 0;
+  }
+
+  /// Version of AllocateReg with list of registers to be shadowed.
+  unsigned AllocateReg(ArrayRef<MCPhysReg> Regs, const MCPhysReg *ShadowRegs) {
+    unsigned FirstUnalloc = getFirstUnallocated(Regs);
+    if (FirstUnalloc == Regs.size())
+      return 0;    // Didn't find the reg.
+
+    // Mark the register and any aliases as allocated.
+    unsigned Reg = Regs[FirstUnalloc], ShadowReg = ShadowRegs[FirstUnalloc];
+    MarkAllocated(Reg);
+    MarkAllocated(ShadowReg);
+    return Reg;
+  }
+
+  /// AllocateStack - Allocate a chunk of stack space with the specified size
+  /// and alignment.
+  unsigned AllocateStack(unsigned Size, unsigned Align) {
+    assert(Align && ((Align - 1) & Align) == 0); // Align is power of 2.
+    StackOffset = alignTo(StackOffset, Align);
+    unsigned Result = StackOffset;
+    StackOffset += Size;
+    MaxStackArgAlign = std::max(Align, MaxStackArgAlign);
+    ensureMaxAlignment(Align);
+    return Result;
+  }
+
+  void ensureMaxAlignment(unsigned Align) {
+    if (!AnalyzingMustTailForwardedRegs)
+      MF.getFrameInfo().ensureMaxAlignment(Align);
+  }
+
+  /// Version of AllocateStack with extra register to be shadowed.
+  unsigned AllocateStack(unsigned Size, unsigned Align, unsigned ShadowReg) {
+    MarkAllocated(ShadowReg);
+    return AllocateStack(Size, Align);
+  }
+
+  /// Version of AllocateStack with list of extra registers to be shadowed.
+  /// Note that, unlike AllocateReg, this shadows ALL of the shadow registers.
+  unsigned AllocateStack(unsigned Size, unsigned Align,
+                         ArrayRef<MCPhysReg> ShadowRegs) {
+    for (unsigned i = 0; i < ShadowRegs.size(); ++i)
+      MarkAllocated(ShadowRegs[i]);
+    return AllocateStack(Size, Align);
+  }
+
+  // HandleByVal - Allocate a stack slot large enough to pass an argument by
+  // value. The size and alignment information of the argument is encoded in its
+  // parameter attribute.
+  void HandleByVal(unsigned ValNo, MVT ValVT,
+                   MVT LocVT, CCValAssign::LocInfo LocInfo,
+                   int MinSize, int MinAlign, ISD::ArgFlagsTy ArgFlags);
+
+  // Returns count of byval arguments that are to be stored (even partly)
+  // in registers.
+  unsigned getInRegsParamsCount() const { return ByValRegs.size(); }
+
+  // Returns count of byval in-regs arguments proceed.
+  unsigned getInRegsParamsProcessed() const { return InRegsParamsProcessed; }
+
+  // Get information about N-th byval parameter that is stored in registers.
+  // Here "ByValParamIndex" is N.
+  void getInRegsParamInfo(unsigned InRegsParamRecordIndex,
+                          unsigned& BeginReg, unsigned& EndReg) const {
+    assert(InRegsParamRecordIndex < ByValRegs.size() &&
+           "Wrong ByVal parameter index");
+
+    const ByValInfo& info = ByValRegs[InRegsParamRecordIndex];
+    BeginReg = info.Begin;
+    EndReg = info.End;
+  }
+
+  // Add information about parameter that is kept in registers.
+  void addInRegsParamInfo(unsigned RegBegin, unsigned RegEnd) {
+    ByValRegs.push_back(ByValInfo(RegBegin, RegEnd));
+  }
+
+  // Goes either to next byval parameter (excluding "waste" record), or
+  // to the end of collection.
+  // Returns false, if end is reached.
+  bool nextInRegsParam() {
+    unsigned e = ByValRegs.size();
+    if (InRegsParamsProcessed < e)
+      ++InRegsParamsProcessed;
+    return InRegsParamsProcessed < e;
+  }
+
+  // Clear byval registers tracking info.
+  void clearByValRegsInfo() {
+    InRegsParamsProcessed = 0;
+    ByValRegs.clear();
+  }
+
+  // Rewind byval registers tracking info.
+  void rewindByValRegsInfo() {
+    InRegsParamsProcessed = 0;
+  }
+
+  // Get list of pending assignments
+  SmallVectorImpl<CCValAssign> &getPendingLocs() {
+    return PendingLocs;
+  }
+
+  // Get a list of argflags for pending assignments.
+  SmallVectorImpl<ISD::ArgFlagsTy> &getPendingArgFlags() {
+    return PendingArgFlags;
+  }
+
+  /// Compute the remaining unused register parameters that would be used for
+  /// the given value type. This is useful when varargs are passed in the
+  /// registers that normal prototyped parameters would be passed in, or for
+  /// implementing perfect forwarding.
+  void getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs, MVT VT,
+                                   CCAssignFn Fn);
+
+  /// Compute the set of registers that need to be preserved and forwarded to
+  /// any musttail calls.
+  void analyzeMustTailForwardedRegisters(
+      SmallVectorImpl<ForwardedRegister> &Forwards, ArrayRef<MVT> RegParmTypes,
+      CCAssignFn Fn);
+
+  /// Returns true if the results of the two calling conventions are compatible.
+  /// This is usually part of the check for tailcall eligibility.
+  static bool resultsCompatible(CallingConv::ID CalleeCC,
+                                CallingConv::ID CallerCC, MachineFunction &MF,
+                                LLVMContext &C,
+                                const SmallVectorImpl<ISD::InputArg> &Ins,
+                                CCAssignFn CalleeFn, CCAssignFn CallerFn);
+
+  /// The function runs an additional analysis pass over function arguments.
+  /// It will mark each argument with the attribute flag SecArgPass.
+  /// After running, it will sort the locs list.
+  template <class T>
+  void AnalyzeArgumentsSecondPass(const SmallVectorImpl<T> &Args,
+                                  CCAssignFn Fn) {
+    unsigned NumFirstPassLocs = Locs.size();
+
+    /// Creates similar argument list to \p Args in which each argument is
+    /// marked using SecArgPass flag.
+    SmallVector<T, 16> SecPassArg;
+    // SmallVector<ISD::InputArg, 16> SecPassArg;
+    for (auto Arg : Args) {
+      Arg.Flags.setSecArgPass();
+      SecPassArg.push_back(Arg);
+    }
+
+    // Run the second argument pass
+    AnalyzeArguments(SecPassArg, Fn);
+
+    // Sort the locations of the arguments according to their original position.
+    SmallVector<CCValAssign, 16> TmpArgLocs;
+    std::swap(TmpArgLocs, Locs);
+    auto B = TmpArgLocs.begin(), E = TmpArgLocs.end();
+    std::merge(B, B + NumFirstPassLocs, B + NumFirstPassLocs, E,
+               std::back_inserter(Locs),
+               [](const CCValAssign &A, const CCValAssign &B) -> bool {
+                 return A.getValNo() < B.getValNo();
+               });
+  }
+
+private:
+  /// MarkAllocated - Mark a register and all of its aliases as allocated.
+  void MarkAllocated(unsigned Reg);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_CALLINGCONVLOWER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/CommandFlags.def b/linux-x64/clang/include/llvm/CodeGen/CommandFlags.def
new file mode 100644
index 0000000..3708c04
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/CommandFlags.def
@@ -0,0 +1,389 @@
+//===-- CommandFlags.h - Command Line Flags Interface -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains codegen-specific flags that are shared between different
+// command line tools. The tools "llc" and "opt" both use this file to prevent
+// flag duplication.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Module.h"
+#include "llvm/MC/MCTargetOptionsCommandFlags.def"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include <string>
+using namespace llvm;
+
+static cl::opt<std::string>
+    MArch("march",
+          cl::desc("Architecture to generate code for (see --version)"));
+
+static cl::opt<std::string>
+    MCPU("mcpu",
+         cl::desc("Target a specific cpu type (-mcpu=help for details)"),
+         cl::value_desc("cpu-name"), cl::init(""));
+
+static cl::list<std::string>
+    MAttrs("mattr", cl::CommaSeparated,
+           cl::desc("Target specific attributes (-mattr=help for details)"),
+           cl::value_desc("a1,+a2,-a3,..."));
+
+static cl::opt<Reloc::Model> RelocModel(
+    "relocation-model", cl::desc("Choose relocation model"),
+    cl::values(
+        clEnumValN(Reloc::Static, "static", "Non-relocatable code"),
+        clEnumValN(Reloc::PIC_, "pic",
+                   "Fully relocatable, position independent code"),
+        clEnumValN(Reloc::DynamicNoPIC, "dynamic-no-pic",
+                   "Relocatable external references, non-relocatable code"),
+        clEnumValN(Reloc::ROPI, "ropi",
+                   "Code and read-only data relocatable, accessed PC-relative"),
+        clEnumValN(
+            Reloc::RWPI, "rwpi",
+            "Read-write data relocatable, accessed relative to static base"),
+        clEnumValN(Reloc::ROPI_RWPI, "ropi-rwpi",
+                   "Combination of ropi and rwpi")));
+
+LLVM_ATTRIBUTE_UNUSED static Optional<Reloc::Model> getRelocModel() {
+  if (RelocModel.getNumOccurrences()) {
+    Reloc::Model R = RelocModel;
+    return R;
+  }
+  return None;
+}
+
+static cl::opt<ThreadModel::Model> TMModel(
+    "thread-model", cl::desc("Choose threading model"),
+    cl::init(ThreadModel::POSIX),
+    cl::values(clEnumValN(ThreadModel::POSIX, "posix", "POSIX thread model"),
+               clEnumValN(ThreadModel::Single, "single",
+                          "Single thread model")));
+
+static cl::opt<llvm::CodeModel::Model> CMModel(
+    "code-model", cl::desc("Choose code model"),
+    cl::values(clEnumValN(CodeModel::Small, "small", "Small code model"),
+               clEnumValN(CodeModel::Kernel, "kernel", "Kernel code model"),
+               clEnumValN(CodeModel::Medium, "medium", "Medium code model"),
+               clEnumValN(CodeModel::Large, "large", "Large code model")));
+
+LLVM_ATTRIBUTE_UNUSED static Optional<CodeModel::Model> getCodeModel() {
+  if (CMModel.getNumOccurrences()) {
+    CodeModel::Model M = CMModel;
+    return M;
+  }
+  return None;
+}
+
+static cl::opt<llvm::ExceptionHandling> ExceptionModel(
+    "exception-model", cl::desc("exception model"),
+    cl::init(ExceptionHandling::None),
+    cl::values(
+        clEnumValN(ExceptionHandling::None, "default",
+                   "default exception handling model"),
+        clEnumValN(ExceptionHandling::DwarfCFI, "dwarf",
+                   "DWARF-like CFI based exception handling"),
+        clEnumValN(ExceptionHandling::SjLj, "sjlj", "SjLj exception handling"),
+        clEnumValN(ExceptionHandling::ARM, "arm", "ARM EHABI exceptions"),
+        clEnumValN(ExceptionHandling::WinEH, "wineh",
+                   "Windows exception model"),
+        clEnumValN(ExceptionHandling::Wasm, "wasm",
+                   "WebAssembly exception handling")));
+
+static cl::opt<TargetMachine::CodeGenFileType> FileType(
+    "filetype", cl::init(TargetMachine::CGFT_AssemblyFile),
+    cl::desc(
+        "Choose a file type (not all types are supported by all targets):"),
+    cl::values(clEnumValN(TargetMachine::CGFT_AssemblyFile, "asm",
+                          "Emit an assembly ('.s') file"),
+               clEnumValN(TargetMachine::CGFT_ObjectFile, "obj",
+                          "Emit a native object ('.o') file"),
+               clEnumValN(TargetMachine::CGFT_Null, "null",
+                          "Emit nothing, for performance testing")));
+
+static cl::opt<bool>
+    DisableFPElim("disable-fp-elim",
+                  cl::desc("Disable frame pointer elimination optimization"),
+                  cl::init(false));
+
+static cl::opt<bool> EnableUnsafeFPMath(
+    "enable-unsafe-fp-math",
+    cl::desc("Enable optimizations that may decrease FP precision"),
+    cl::init(false));
+
+static cl::opt<bool> EnableNoInfsFPMath(
+    "enable-no-infs-fp-math",
+    cl::desc("Enable FP math optimizations that assume no +-Infs"),
+    cl::init(false));
+
+static cl::opt<bool> EnableNoNaNsFPMath(
+    "enable-no-nans-fp-math",
+    cl::desc("Enable FP math optimizations that assume no NaNs"),
+    cl::init(false));
+
+static cl::opt<bool> EnableNoSignedZerosFPMath(
+    "enable-no-signed-zeros-fp-math",
+    cl::desc("Enable FP math optimizations that assume "
+             "the sign of 0 is insignificant"),
+    cl::init(false));
+
+static cl::opt<bool>
+    EnableNoTrappingFPMath("enable-no-trapping-fp-math",
+                           cl::desc("Enable setting the FP exceptions build "
+                                    "attribute not to use exceptions"),
+                           cl::init(false));
+
+static cl::opt<llvm::FPDenormal::DenormalMode> DenormalMode(
+    "denormal-fp-math",
+    cl::desc("Select which denormal numbers the code is permitted to require"),
+    cl::init(FPDenormal::IEEE),
+    cl::values(clEnumValN(FPDenormal::IEEE, "ieee",
+                          "IEEE 754 denormal numbers"),
+               clEnumValN(FPDenormal::PreserveSign, "preserve-sign",
+                          "the sign of a  flushed-to-zero number is preserved "
+                          "in the sign of 0"),
+               clEnumValN(FPDenormal::PositiveZero, "positive-zero",
+                          "denormals are flushed to positive zero")));
+
+static cl::opt<bool> EnableHonorSignDependentRoundingFPMath(
+    "enable-sign-dependent-rounding-fp-math", cl::Hidden,
+    cl::desc("Force codegen to assume rounding mode can change dynamically"),
+    cl::init(false));
+
+static cl::opt<llvm::FloatABI::ABIType> FloatABIForCalls(
+    "float-abi", cl::desc("Choose float ABI type"), cl::init(FloatABI::Default),
+    cl::values(clEnumValN(FloatABI::Default, "default",
+                          "Target default float ABI type"),
+               clEnumValN(FloatABI::Soft, "soft",
+                          "Soft float ABI (implied by -soft-float)"),
+               clEnumValN(FloatABI::Hard, "hard",
+                          "Hard float ABI (uses FP registers)")));
+
+static cl::opt<llvm::FPOpFusion::FPOpFusionMode> FuseFPOps(
+    "fp-contract", cl::desc("Enable aggressive formation of fused FP ops"),
+    cl::init(FPOpFusion::Standard),
+    cl::values(
+        clEnumValN(FPOpFusion::Fast, "fast", "Fuse FP ops whenever profitable"),
+        clEnumValN(FPOpFusion::Standard, "on", "Only fuse 'blessed' FP ops."),
+        clEnumValN(FPOpFusion::Strict, "off",
+                   "Only fuse FP ops when the result won't be affected.")));
+
+static cl::opt<bool> DontPlaceZerosInBSS(
+    "nozero-initialized-in-bss",
+    cl::desc("Don't place zero-initialized symbols into bss section"),
+    cl::init(false));
+
+static cl::opt<bool> EnableGuaranteedTailCallOpt(
+    "tailcallopt",
+    cl::desc(
+        "Turn fastcc calls into tail calls by (potentially) changing ABI."),
+    cl::init(false));
+
+static cl::opt<bool> DisableTailCalls("disable-tail-calls",
+                                      cl::desc("Never emit tail calls"),
+                                      cl::init(false));
+
+static cl::opt<bool> StackSymbolOrdering("stack-symbol-ordering",
+                                         cl::desc("Order local stack symbols."),
+                                         cl::init(true));
+
+static cl::opt<unsigned>
+    OverrideStackAlignment("stack-alignment",
+                           cl::desc("Override default stack alignment"),
+                           cl::init(0));
+
+static cl::opt<bool>
+    StackRealign("stackrealign",
+                 cl::desc("Force align the stack to the minimum alignment"),
+                 cl::init(false));
+
+static cl::opt<std::string> TrapFuncName(
+    "trap-func", cl::Hidden,
+    cl::desc("Emit a call to trap function rather than a trap instruction"),
+    cl::init(""));
+
+static cl::opt<bool> UseCtors("use-ctors",
+                              cl::desc("Use .ctors instead of .init_array."),
+                              cl::init(false));
+
+static cl::opt<bool> RelaxELFRelocations(
+    "relax-elf-relocations",
+    cl::desc("Emit GOTPCRELX/REX_GOTPCRELX instead of GOTPCREL on x86-64 ELF"),
+    cl::init(false));
+
+static cl::opt<bool> DataSections("data-sections",
+                                  cl::desc("Emit data into separate sections"),
+                                  cl::init(false));
+
+static cl::opt<bool>
+    FunctionSections("function-sections",
+                     cl::desc("Emit functions into separate sections"),
+                     cl::init(false));
+
+static cl::opt<bool> EmulatedTLS("emulated-tls",
+                                 cl::desc("Use emulated TLS model"),
+                                 cl::init(false));
+
+static cl::opt<bool>
+    UniqueSectionNames("unique-section-names",
+                       cl::desc("Give unique names to every section"),
+                       cl::init(true));
+
+static cl::opt<llvm::EABI>
+    EABIVersion("meabi", cl::desc("Set EABI type (default depends on triple):"),
+                cl::init(EABI::Default),
+                cl::values(clEnumValN(EABI::Default, "default",
+                                      "Triple default EABI version"),
+                           clEnumValN(EABI::EABI4, "4", "EABI version 4"),
+                           clEnumValN(EABI::EABI5, "5", "EABI version 5"),
+                           clEnumValN(EABI::GNU, "gnu", "EABI GNU")));
+
+static cl::opt<DebuggerKind> DebuggerTuningOpt(
+    "debugger-tune", cl::desc("Tune debug info for a particular debugger"),
+    cl::init(DebuggerKind::Default),
+    cl::values(clEnumValN(DebuggerKind::GDB, "gdb", "gdb"),
+               clEnumValN(DebuggerKind::LLDB, "lldb", "lldb"),
+               clEnumValN(DebuggerKind::SCE, "sce", "SCE targets (e.g. PS4)")));
+
+static cl::opt<bool> EnableStackSizeSection(
+    "stack-size-section",
+    cl::desc("Emit a section containing stack size metadata"), cl::init(false));
+
+// Common utility function tightly tied to the options listed here. Initializes
+// a TargetOptions object with CodeGen flags and returns it.
+static TargetOptions InitTargetOptionsFromCodeGenFlags() {
+  TargetOptions Options;
+  Options.AllowFPOpFusion = FuseFPOps;
+  Options.UnsafeFPMath = EnableUnsafeFPMath;
+  Options.NoInfsFPMath = EnableNoInfsFPMath;
+  Options.NoNaNsFPMath = EnableNoNaNsFPMath;
+  Options.NoSignedZerosFPMath = EnableNoSignedZerosFPMath;
+  Options.NoTrappingFPMath = EnableNoTrappingFPMath;
+  Options.FPDenormalMode = DenormalMode;
+  Options.HonorSignDependentRoundingFPMathOption =
+      EnableHonorSignDependentRoundingFPMath;
+  if (FloatABIForCalls != FloatABI::Default)
+    Options.FloatABIType = FloatABIForCalls;
+  Options.NoZerosInBSS = DontPlaceZerosInBSS;
+  Options.GuaranteedTailCallOpt = EnableGuaranteedTailCallOpt;
+  Options.StackAlignmentOverride = OverrideStackAlignment;
+  Options.StackSymbolOrdering = StackSymbolOrdering;
+  Options.UseInitArray = !UseCtors;
+  Options.RelaxELFRelocations = RelaxELFRelocations;
+  Options.DataSections = DataSections;
+  Options.FunctionSections = FunctionSections;
+  Options.UniqueSectionNames = UniqueSectionNames;
+  Options.EmulatedTLS = EmulatedTLS;
+  Options.ExplicitEmulatedTLS = EmulatedTLS.getNumOccurrences() > 0;
+  Options.ExceptionModel = ExceptionModel;
+  Options.EmitStackSizeSection = EnableStackSizeSection;
+
+  Options.MCOptions = InitMCTargetOptionsFromFlags();
+
+  Options.ThreadModel = TMModel;
+  Options.EABIVersion = EABIVersion;
+  Options.DebuggerTuning = DebuggerTuningOpt;
+
+  return Options;
+}
+
+LLVM_ATTRIBUTE_UNUSED static std::string getCPUStr() {
+  // If user asked for the 'native' CPU, autodetect here. If autodection fails,
+  // this will set the CPU to an empty string which tells the target to
+  // pick a basic default.
+  if (MCPU == "native")
+    return sys::getHostCPUName();
+
+  return MCPU;
+}
+
+LLVM_ATTRIBUTE_UNUSED static std::string getFeaturesStr() {
+  SubtargetFeatures Features;
+
+  // If user asked for the 'native' CPU, we need to autodetect features.
+  // This is necessary for x86 where the CPU might not support all the
+  // features the autodetected CPU name lists in the target. For example,
+  // not all Sandybridge processors support AVX.
+  if (MCPU == "native") {
+    StringMap<bool> HostFeatures;
+    if (sys::getHostCPUFeatures(HostFeatures))
+      for (auto &F : HostFeatures)
+        Features.AddFeature(F.first(), F.second);
+  }
+
+  for (unsigned i = 0; i != MAttrs.size(); ++i)
+    Features.AddFeature(MAttrs[i]);
+
+  return Features.getString();
+}
+
+LLVM_ATTRIBUTE_UNUSED static std::vector<std::string> getFeatureList() {
+  SubtargetFeatures Features;
+
+  // If user asked for the 'native' CPU, we need to autodetect features.
+  // This is necessary for x86 where the CPU might not support all the
+  // features the autodetected CPU name lists in the target. For example,
+  // not all Sandybridge processors support AVX.
+  if (MCPU == "native") {
+    StringMap<bool> HostFeatures;
+    if (sys::getHostCPUFeatures(HostFeatures))
+      for (auto &F : HostFeatures)
+        Features.AddFeature(F.first(), F.second);
+  }
+
+  for (unsigned i = 0; i != MAttrs.size(); ++i)
+    Features.AddFeature(MAttrs[i]);
+
+  return Features.getFeatures();
+}
+
+/// \brief Set function attributes of functions in Module M based on CPU,
+/// Features, and command line flags.
+LLVM_ATTRIBUTE_UNUSED static void
+setFunctionAttributes(StringRef CPU, StringRef Features, Module &M) {
+  for (auto &F : M) {
+    auto &Ctx = F.getContext();
+    AttributeList Attrs = F.getAttributes();
+    AttrBuilder NewAttrs;
+
+    if (!CPU.empty())
+      NewAttrs.addAttribute("target-cpu", CPU);
+    if (!Features.empty())
+      NewAttrs.addAttribute("target-features", Features);
+    if (DisableFPElim.getNumOccurrences() > 0)
+      NewAttrs.addAttribute("no-frame-pointer-elim",
+                            DisableFPElim ? "true" : "false");
+    if (DisableTailCalls.getNumOccurrences() > 0)
+      NewAttrs.addAttribute("disable-tail-calls",
+                            toStringRef(DisableTailCalls));
+    if (StackRealign)
+      NewAttrs.addAttribute("stackrealign");
+
+    if (TrapFuncName.getNumOccurrences() > 0)
+      for (auto &B : F)
+        for (auto &I : B)
+          if (auto *Call = dyn_cast<CallInst>(&I))
+            if (const auto *F = Call->getCalledFunction())
+              if (F->getIntrinsicID() == Intrinsic::debugtrap ||
+                  F->getIntrinsicID() == Intrinsic::trap)
+                Call->addAttribute(
+                    llvm::AttributeList::FunctionIndex,
+                    Attribute::get(Ctx, "trap-func-name", TrapFuncName));
+
+    // Let NewAttrs override Attrs.
+    F.setAttributes(
+        Attrs.addAttributes(Ctx, AttributeList::FunctionIndex, NewAttrs));
+  }
+}
diff --git a/linux-x64/clang/include/llvm/CodeGen/CostTable.h b/linux-x64/clang/include/llvm/CodeGen/CostTable.h
new file mode 100644
index 0000000..0fc16d3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/CostTable.h
@@ -0,0 +1,69 @@
+//===-- CostTable.h - Instruction Cost Table handling -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Cost tables and simple lookup functions
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_COSTTABLE_H_
+#define LLVM_CODEGEN_COSTTABLE_H_
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/MachineValueType.h"
+
+namespace llvm {
+
+/// Cost Table Entry
+struct CostTblEntry {
+  int ISD;
+  MVT::SimpleValueType Type;
+  unsigned Cost;
+};
+
+/// Find in cost table, TypeTy must be comparable to CompareTy by ==
+inline const CostTblEntry *CostTableLookup(ArrayRef<CostTblEntry> Tbl,
+                                           int ISD, MVT Ty) {
+  auto I = find_if(Tbl, [=](const CostTblEntry &Entry) {
+    return ISD == Entry.ISD && Ty == Entry.Type;
+  });
+  if (I != Tbl.end())
+    return I;
+
+  // Could not find an entry.
+  return nullptr;
+}
+
+/// Type Conversion Cost Table
+struct TypeConversionCostTblEntry {
+  int ISD;
+  MVT::SimpleValueType Dst;
+  MVT::SimpleValueType Src;
+  unsigned Cost;
+};
+
+/// Find in type conversion cost table, TypeTy must be comparable to CompareTy
+/// by ==
+inline const TypeConversionCostTblEntry *
+ConvertCostTableLookup(ArrayRef<TypeConversionCostTblEntry> Tbl,
+                       int ISD, MVT Dst, MVT Src) {
+  auto I = find_if(Tbl, [=](const TypeConversionCostTblEntry &Entry) {
+    return ISD == Entry.ISD && Src == Entry.Src && Dst == Entry.Dst;
+  });
+  if (I != Tbl.end())
+    return I;
+
+  // Could not find an entry.
+  return nullptr;
+}
+
+} // namespace llvm
+
+#endif /* LLVM_CODEGEN_COSTTABLE_H_ */
diff --git a/linux-x64/clang/include/llvm/CodeGen/DAGCombine.h b/linux-x64/clang/include/llvm/CodeGen/DAGCombine.h
new file mode 100644
index 0000000..8b59190
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/DAGCombine.h
@@ -0,0 +1,25 @@
+//===-- llvm/CodeGen/DAGCombine.h  ------- SelectionDAG Nodes ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+
+#ifndef LLVM_CODEGEN_DAGCOMBINE_H
+#define LLVM_CODEGEN_DAGCOMBINE_H
+
+namespace llvm {
+
+enum CombineLevel {
+  BeforeLegalizeTypes,
+  AfterLegalizeTypes,
+  AfterLegalizeVectorOps,
+  AfterLegalizeDAG
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/DFAPacketizer.h b/linux-x64/clang/include/llvm/CodeGen/DFAPacketizer.h
new file mode 100644
index 0000000..d3aabe2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/DFAPacketizer.h
@@ -0,0 +1,222 @@
+//===- llvm/CodeGen/DFAPacketizer.h - DFA Packetizer for VLIW ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This class implements a deterministic finite automaton (DFA) based
+// packetizing mechanism for VLIW architectures. It provides APIs to
+// determine whether there exists a legal mapping of instructions to
+// functional unit assignments in a packet. The DFA is auto-generated from
+// the target's Schedule.td file.
+//
+// A DFA consists of 3 major elements: states, inputs, and transitions. For
+// the packetizing mechanism, the input is the set of instruction classes for
+// a target. The state models all possible combinations of functional unit
+// consumption for a given set of instructions in a packet. A transition
+// models the addition of an instruction to a packet. In the DFA constructed
+// by this class, if an instruction can be added to a packet, then a valid
+// transition exists from the corresponding state. Invalid transitions
+// indicate that the instruction cannot be added to the current packet.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_DFAPACKETIZER_H
+#define LLVM_CODEGEN_DFAPACKETIZER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/ScheduleDAGMutation.h"
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class DefaultVLIWScheduler;
+class InstrItineraryData;
+class MachineFunction;
+class MachineInstr;
+class MachineLoopInfo;
+class MCInstrDesc;
+class SUnit;
+class TargetInstrInfo;
+
+// --------------------------------------------------------------------
+// Definitions shared between DFAPacketizer.cpp and DFAPacketizerEmitter.cpp
+
+// DFA_MAX_RESTERMS * DFA_MAX_RESOURCES must fit within sizeof DFAInput.
+// This is verified in DFAPacketizer.cpp:DFAPacketizer::DFAPacketizer.
+//
+// e.g. terms x resource bit combinations that fit in uint32_t:
+//      4 terms x 8  bits = 32 bits
+//      3 terms x 10 bits = 30 bits
+//      2 terms x 16 bits = 32 bits
+//
+// e.g. terms x resource bit combinations that fit in uint64_t:
+//      8 terms x 8  bits = 64 bits
+//      7 terms x 9  bits = 63 bits
+//      6 terms x 10 bits = 60 bits
+//      5 terms x 12 bits = 60 bits
+//      4 terms x 16 bits = 64 bits <--- current
+//      3 terms x 21 bits = 63 bits
+//      2 terms x 32 bits = 64 bits
+//
+#define DFA_MAX_RESTERMS        4   // The max # of AND'ed resource terms.
+#define DFA_MAX_RESOURCES       16  // The max # of resource bits in one term.
+
+using DFAInput = uint64_t;
+using DFAStateInput = int64_t;
+
+#define DFA_TBLTYPE             "int64_t" // For generating DFAStateInputTable.
+// --------------------------------------------------------------------
+
+class DFAPacketizer {
+private:
+  using UnsignPair = std::pair<unsigned, DFAInput>;
+
+  const InstrItineraryData *InstrItins;
+  int CurrentState = 0;
+  const DFAStateInput (*DFAStateInputTable)[2];
+  const unsigned *DFAStateEntryTable;
+
+  // CachedTable is a map from <FromState, Input> to ToState.
+  DenseMap<UnsignPair, unsigned> CachedTable;
+
+  // Read the DFA transition table and update CachedTable.
+  void ReadTable(unsigned state);
+
+public:
+  DFAPacketizer(const InstrItineraryData *I, const DFAStateInput (*SIT)[2],
+                const unsigned *SET);
+
+  // Reset the current state to make all resources available.
+  void clearResources() {
+    CurrentState = 0;
+  }
+
+  // Return the DFAInput for an instruction class.
+  DFAInput getInsnInput(unsigned InsnClass);
+
+  // Return the DFAInput for an instruction class input vector.
+  static DFAInput getInsnInput(const std::vector<unsigned> &InsnClass);
+
+  // Check if the resources occupied by a MCInstrDesc are available in
+  // the current state.
+  bool canReserveResources(const MCInstrDesc *MID);
+
+  // Reserve the resources occupied by a MCInstrDesc and change the current
+  // state to reflect that change.
+  void reserveResources(const MCInstrDesc *MID);
+
+  // Check if the resources occupied by a machine instruction are available
+  // in the current state.
+  bool canReserveResources(MachineInstr &MI);
+
+  // Reserve the resources occupied by a machine instruction and change the
+  // current state to reflect that change.
+  void reserveResources(MachineInstr &MI);
+
+  const InstrItineraryData *getInstrItins() const { return InstrItins; }
+};
+
+// VLIWPacketizerList implements a simple VLIW packetizer using DFA. The
+// packetizer works on machine basic blocks. For each instruction I in BB,
+// the packetizer consults the DFA to see if machine resources are available
+// to execute I. If so, the packetizer checks if I depends on any instruction
+// in the current packet. If no dependency is found, I is added to current
+// packet and the machine resource is marked as taken. If any dependency is
+// found, a target API call is made to prune the dependence.
+class VLIWPacketizerList {
+protected:
+  MachineFunction &MF;
+  const TargetInstrInfo *TII;
+  AliasAnalysis *AA;
+
+  // The VLIW Scheduler.
+  DefaultVLIWScheduler *VLIWScheduler;
+  // Vector of instructions assigned to the current packet.
+  std::vector<MachineInstr*> CurrentPacketMIs;
+  // DFA resource tracker.
+  DFAPacketizer *ResourceTracker;
+  // Map: MI -> SU.
+  std::map<MachineInstr*, SUnit*> MIToSUnit;
+
+public:
+  // The AliasAnalysis parameter can be nullptr.
+  VLIWPacketizerList(MachineFunction &MF, MachineLoopInfo &MLI,
+                     AliasAnalysis *AA);
+
+  virtual ~VLIWPacketizerList();
+
+  // Implement this API in the backend to bundle instructions.
+  void PacketizeMIs(MachineBasicBlock *MBB,
+                    MachineBasicBlock::iterator BeginItr,
+                    MachineBasicBlock::iterator EndItr);
+
+  // Return the ResourceTracker.
+  DFAPacketizer *getResourceTracker() {return ResourceTracker;}
+
+  // addToPacket - Add MI to the current packet.
+  virtual MachineBasicBlock::iterator addToPacket(MachineInstr &MI) {
+    CurrentPacketMIs.push_back(&MI);
+    ResourceTracker->reserveResources(MI);
+    return MI;
+  }
+
+  // End the current packet and reset the state of the packetizer.
+  // Overriding this function allows the target-specific packetizer
+  // to perform custom finalization.
+  virtual void endPacket(MachineBasicBlock *MBB,
+                         MachineBasicBlock::iterator MI);
+
+  // Perform initialization before packetizing an instruction. This
+  // function is supposed to be overrided by the target dependent packetizer.
+  virtual void initPacketizerState() {}
+
+  // Check if the given instruction I should be ignored by the packetizer.
+  virtual bool ignorePseudoInstruction(const MachineInstr &I,
+                                       const MachineBasicBlock *MBB) {
+    return false;
+  }
+
+  // Return true if instruction MI can not be packetized with any other
+  // instruction, which means that MI itself is a packet.
+  virtual bool isSoloInstruction(const MachineInstr &MI) { return true; }
+
+  // Check if the packetizer should try to add the given instruction to
+  // the current packet. One reasons for which it may not be desirable
+  // to include an instruction in the current packet could be that it
+  // would cause a stall.
+  // If this function returns "false", the current packet will be ended,
+  // and the instruction will be added to the next packet.
+  virtual bool shouldAddToPacket(const MachineInstr &MI) { return true; }
+
+  // Check if it is legal to packetize SUI and SUJ together.
+  virtual bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
+    return false;
+  }
+
+  // Check if it is legal to prune dependece between SUI and SUJ.
+  virtual bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {
+    return false;
+  }
+
+  // Add a DAG mutation to be done before the packetization begins.
+  void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation);
+
+  bool alias(const MachineInstr &MI1, const MachineInstr &MI2,
+             bool UseTBAA = true) const;
+
+private:
+  bool alias(const MachineMemOperand &Op1, const MachineMemOperand &Op2,
+             bool UseTBAA = true) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_DFAPACKETIZER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/DIE.h b/linux-x64/clang/include/llvm/CodeGen/DIE.h
new file mode 100644
index 0000000..f809fc9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/DIE.h
@@ -0,0 +1,910 @@
+//===- lib/CodeGen/DIE.h - DWARF Info Entries -------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Data structures for DWARF info entries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_CODEGEN_ASMPRINTER_DIE_H
+#define LLVM_LIB_CODEGEN_ASMPRINTER_DIE_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/CodeGen/DwarfStringPoolEntry.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <new>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class AsmPrinter;
+class DIE;
+class DIEUnit;
+class MCExpr;
+class MCSection;
+class MCSymbol;
+class raw_ostream;
+
+//===--------------------------------------------------------------------===//
+/// Dwarf abbreviation data, describes one attribute of a Dwarf abbreviation.
+class DIEAbbrevData {
+  /// Dwarf attribute code.
+  dwarf::Attribute Attribute;
+
+  /// Dwarf form code.
+  dwarf::Form Form;
+
+  /// Dwarf attribute value for DW_FORM_implicit_const
+  int64_t Value = 0;
+
+public:
+  DIEAbbrevData(dwarf::Attribute A, dwarf::Form F)
+      : Attribute(A), Form(F) {}
+  DIEAbbrevData(dwarf::Attribute A, int64_t V)
+      : Attribute(A), Form(dwarf::DW_FORM_implicit_const), Value(V) {}
+
+  /// Accessors.
+  /// @{
+  dwarf::Attribute getAttribute() const { return Attribute; }
+  dwarf::Form getForm() const { return Form; }
+  int64_t getValue() const { return Value; }
+  /// @}
+
+  /// Used to gather unique data for the abbreviation folding set.
+  void Profile(FoldingSetNodeID &ID) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// Dwarf abbreviation, describes the organization of a debug information
+/// object.
+class DIEAbbrev : public FoldingSetNode {
+  /// Unique number for node.
+  unsigned Number;
+
+  /// Dwarf tag code.
+  dwarf::Tag Tag;
+
+  /// Whether or not this node has children.
+  ///
+  /// This cheats a bit in all of the uses since the values in the standard
+  /// are 0 and 1 for no children and children respectively.
+  bool Children;
+
+  /// Raw data bytes for abbreviation.
+  SmallVector<DIEAbbrevData, 12> Data;
+
+public:
+  DIEAbbrev(dwarf::Tag T, bool C) : Tag(T), Children(C) {}
+
+  /// Accessors.
+  /// @{
+  dwarf::Tag getTag() const { return Tag; }
+  unsigned getNumber() const { return Number; }
+  bool hasChildren() const { return Children; }
+  const SmallVectorImpl<DIEAbbrevData> &getData() const { return Data; }
+  void setChildrenFlag(bool hasChild) { Children = hasChild; }
+  void setNumber(unsigned N) { Number = N; }
+  /// @}
+
+  /// Adds another set of attribute information to the abbreviation.
+  void AddAttribute(dwarf::Attribute Attribute, dwarf::Form Form) {
+    Data.push_back(DIEAbbrevData(Attribute, Form));
+  }
+
+  /// Adds attribute with DW_FORM_implicit_const value
+  void AddImplicitConstAttribute(dwarf::Attribute Attribute, int64_t Value) {
+    Data.push_back(DIEAbbrevData(Attribute, Value));
+  }
+
+  /// Used to gather unique data for the abbreviation folding set.
+  void Profile(FoldingSetNodeID &ID) const;
+
+  /// Print the abbreviation using the specified asm printer.
+  void Emit(const AsmPrinter *AP) const;
+
+  void print(raw_ostream &O) const;
+  void dump() const;
+};
+
+//===--------------------------------------------------------------------===//
+/// Helps unique DIEAbbrev objects and assigns abbreviation numbers.
+///
+/// This class will unique the DIE abbreviations for a llvm::DIE object and
+/// assign a unique abbreviation number to each unique DIEAbbrev object it
+/// finds. The resulting collection of DIEAbbrev objects can then be emitted
+/// into the .debug_abbrev section.
+class DIEAbbrevSet {
+  /// The bump allocator to use when creating DIEAbbrev objects in the uniqued
+  /// storage container.
+  BumpPtrAllocator &Alloc;
+  /// \brief FoldingSet that uniques the abbreviations.
+  FoldingSet<DIEAbbrev> AbbreviationsSet;
+  /// A list of all the unique abbreviations in use.
+  std::vector<DIEAbbrev *> Abbreviations;
+
+public:
+  DIEAbbrevSet(BumpPtrAllocator &A) : Alloc(A) {}
+  ~DIEAbbrevSet();
+
+  /// Generate the abbreviation declaration for a DIE and return a pointer to
+  /// the generated abbreviation.
+  ///
+  /// \param Die the debug info entry to generate the abbreviation for.
+  /// \returns A reference to the uniqued abbreviation declaration that is
+  /// owned by this class.
+  DIEAbbrev &uniqueAbbreviation(DIE &Die);
+
+  /// Print all abbreviations using the specified asm printer.
+  void Emit(const AsmPrinter *AP, MCSection *Section) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// An integer value DIE.
+///
+class DIEInteger {
+  uint64_t Integer;
+
+public:
+  explicit DIEInteger(uint64_t I) : Integer(I) {}
+
+  /// Choose the best form for integer.
+  static dwarf::Form BestForm(bool IsSigned, uint64_t Int) {
+    if (IsSigned) {
+      const int64_t SignedInt = Int;
+      if ((char)Int == SignedInt)
+        return dwarf::DW_FORM_data1;
+      if ((short)Int == SignedInt)
+        return dwarf::DW_FORM_data2;
+      if ((int)Int == SignedInt)
+        return dwarf::DW_FORM_data4;
+    } else {
+      if ((unsigned char)Int == Int)
+        return dwarf::DW_FORM_data1;
+      if ((unsigned short)Int == Int)
+        return dwarf::DW_FORM_data2;
+      if ((unsigned int)Int == Int)
+        return dwarf::DW_FORM_data4;
+    }
+    return dwarf::DW_FORM_data8;
+  }
+
+  uint64_t getValue() const { return Integer; }
+  void setValue(uint64_t Val) { Integer = Val; }
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// An expression DIE.
+class DIEExpr {
+  const MCExpr *Expr;
+
+public:
+  explicit DIEExpr(const MCExpr *E) : Expr(E) {}
+
+  /// Get MCExpr.
+  const MCExpr *getValue() const { return Expr; }
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// A label DIE.
+class DIELabel {
+  const MCSymbol *Label;
+
+public:
+  explicit DIELabel(const MCSymbol *L) : Label(L) {}
+
+  /// Get MCSymbol.
+  const MCSymbol *getValue() const { return Label; }
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// A simple label difference DIE.
+///
+class DIEDelta {
+  const MCSymbol *LabelHi;
+  const MCSymbol *LabelLo;
+
+public:
+  DIEDelta(const MCSymbol *Hi, const MCSymbol *Lo) : LabelHi(Hi), LabelLo(Lo) {}
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// A container for string pool string values.
+///
+/// This class is used with the DW_FORM_strp and DW_FORM_GNU_str_index forms.
+class DIEString {
+  DwarfStringPoolEntryRef S;
+
+public:
+  DIEString(DwarfStringPoolEntryRef S) : S(S) {}
+
+  /// Grab the string out of the object.
+  StringRef getString() const { return S.getString(); }
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// A container for inline string values.
+///
+/// This class is used with the DW_FORM_string form.
+class DIEInlineString {
+  StringRef S;
+
+public:
+  template <typename Allocator>
+  explicit DIEInlineString(StringRef Str, Allocator &A) : S(Str.copy(A)) {}
+
+  ~DIEInlineString() = default;
+
+  /// Grab the string out of the object.
+  StringRef getString() const { return S; }
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// A pointer to another debug information entry.  An instance of this class can
+/// also be used as a proxy for a debug information entry not yet defined
+/// (ie. types.)
+class DIEEntry {
+  DIE *Entry;
+
+public:
+  DIEEntry() = delete;
+  explicit DIEEntry(DIE &E) : Entry(&E) {}
+
+  DIE &getEntry() const { return *Entry; }
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// Represents a pointer to a location list in the debug_loc
+/// section.
+class DIELocList {
+  /// Index into the .debug_loc vector.
+  size_t Index;
+
+public:
+  DIELocList(size_t I) : Index(I) {}
+
+  /// Grab the current index out.
+  size_t getValue() const { return Index; }
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// A debug information entry value. Some of these roughly correlate
+/// to DWARF attribute classes.
+class DIEBlock;
+class DIELoc;
+class DIEValue {
+public:
+  enum Type {
+    isNone,
+#define HANDLE_DIEVALUE(T) is##T,
+#include "llvm/CodeGen/DIEValue.def"
+  };
+
+private:
+  /// Type of data stored in the value.
+  Type Ty = isNone;
+  dwarf::Attribute Attribute = (dwarf::Attribute)0;
+  dwarf::Form Form = (dwarf::Form)0;
+
+  /// Storage for the value.
+  ///
+  /// All values that aren't standard layout (or are larger than 8 bytes)
+  /// should be stored by reference instead of by value.
+  using ValTy = AlignedCharArrayUnion<DIEInteger, DIEString, DIEExpr, DIELabel,
+                                      DIEDelta *, DIEEntry, DIEBlock *,
+                                      DIELoc *, DIELocList>;
+
+  static_assert(sizeof(ValTy) <= sizeof(uint64_t) ||
+                    sizeof(ValTy) <= sizeof(void *),
+                "Expected all large types to be stored via pointer");
+
+  /// Underlying stored value.
+  ValTy Val;
+
+  template <class T> void construct(T V) {
+    static_assert(std::is_standard_layout<T>::value ||
+                      std::is_pointer<T>::value,
+                  "Expected standard layout or pointer");
+    new (reinterpret_cast<void *>(Val.buffer)) T(V);
+  }
+
+  template <class T> T *get() { return reinterpret_cast<T *>(Val.buffer); }
+  template <class T> const T *get() const {
+    return reinterpret_cast<const T *>(Val.buffer);
+  }
+  template <class T> void destruct() { get<T>()->~T(); }
+
+  /// Destroy the underlying value.
+  ///
+  /// This should get optimized down to a no-op.  We could skip it if we could
+  /// add a static assert on \a std::is_trivially_copyable(), but we currently
+  /// support versions of GCC that don't understand that.
+  void destroyVal() {
+    switch (Ty) {
+    case isNone:
+      return;
+#define HANDLE_DIEVALUE_SMALL(T)                                               \
+  case is##T:                                                                  \
+    destruct<DIE##T>();                                                        \
+    return;
+#define HANDLE_DIEVALUE_LARGE(T)                                               \
+  case is##T:                                                                  \
+    destruct<const DIE##T *>();                                                \
+    return;
+#include "llvm/CodeGen/DIEValue.def"
+    }
+  }
+
+  /// Copy the underlying value.
+  ///
+  /// This should get optimized down to a simple copy.  We need to actually
+  /// construct the value, rather than calling memcpy, to satisfy strict
+  /// aliasing rules.
+  void copyVal(const DIEValue &X) {
+    switch (Ty) {
+    case isNone:
+      return;
+#define HANDLE_DIEVALUE_SMALL(T)                                               \
+  case is##T:                                                                  \
+    construct<DIE##T>(*X.get<DIE##T>());                                       \
+    return;
+#define HANDLE_DIEVALUE_LARGE(T)                                               \
+  case is##T:                                                                  \
+    construct<const DIE##T *>(*X.get<const DIE##T *>());                       \
+    return;
+#include "llvm/CodeGen/DIEValue.def"
+    }
+  }
+
+public:
+  DIEValue() = default;
+
+  DIEValue(const DIEValue &X) : Ty(X.Ty), Attribute(X.Attribute), Form(X.Form) {
+    copyVal(X);
+  }
+
+  DIEValue &operator=(const DIEValue &X) {
+    destroyVal();
+    Ty = X.Ty;
+    Attribute = X.Attribute;
+    Form = X.Form;
+    copyVal(X);
+    return *this;
+  }
+
+  ~DIEValue() { destroyVal(); }
+
+#define HANDLE_DIEVALUE_SMALL(T)                                               \
+  DIEValue(dwarf::Attribute Attribute, dwarf::Form Form, const DIE##T &V)      \
+      : Ty(is##T), Attribute(Attribute), Form(Form) {                          \
+    construct<DIE##T>(V);                                                      \
+  }
+#define HANDLE_DIEVALUE_LARGE(T)                                               \
+  DIEValue(dwarf::Attribute Attribute, dwarf::Form Form, const DIE##T *V)      \
+      : Ty(is##T), Attribute(Attribute), Form(Form) {                          \
+    assert(V && "Expected valid value");                                       \
+    construct<const DIE##T *>(V);                                              \
+  }
+#include "llvm/CodeGen/DIEValue.def"
+
+  /// Accessors.
+  /// @{
+  Type getType() const { return Ty; }
+  dwarf::Attribute getAttribute() const { return Attribute; }
+  dwarf::Form getForm() const { return Form; }
+  explicit operator bool() const { return Ty; }
+  /// @}
+
+#define HANDLE_DIEVALUE_SMALL(T)                                               \
+  const DIE##T &getDIE##T() const {                                            \
+    assert(getType() == is##T && "Expected " #T);                              \
+    return *get<DIE##T>();                                                     \
+  }
+#define HANDLE_DIEVALUE_LARGE(T)                                               \
+  const DIE##T &getDIE##T() const {                                            \
+    assert(getType() == is##T && "Expected " #T);                              \
+    return **get<const DIE##T *>();                                            \
+  }
+#include "llvm/CodeGen/DIEValue.def"
+
+  /// Emit value via the Dwarf writer.
+  void EmitValue(const AsmPrinter *AP) const;
+
+  /// Return the size of a value in bytes.
+  unsigned SizeOf(const AsmPrinter *AP) const;
+
+  void print(raw_ostream &O) const;
+  void dump() const;
+};
+
+struct IntrusiveBackListNode {
+  PointerIntPair<IntrusiveBackListNode *, 1> Next;
+
+  IntrusiveBackListNode() : Next(this, true) {}
+
+  IntrusiveBackListNode *getNext() const {
+    return Next.getInt() ? nullptr : Next.getPointer();
+  }
+};
+
+struct IntrusiveBackListBase {
+  using Node = IntrusiveBackListNode;
+
+  Node *Last = nullptr;
+
+  bool empty() const { return !Last; }
+
+  void push_back(Node &N) {
+    assert(N.Next.getPointer() == &N && "Expected unlinked node");
+    assert(N.Next.getInt() == true && "Expected unlinked node");
+
+    if (Last) {
+      N.Next = Last->Next;
+      Last->Next.setPointerAndInt(&N, false);
+    }
+    Last = &N;
+  }
+};
+
+template <class T> class IntrusiveBackList : IntrusiveBackListBase {
+public:
+  using IntrusiveBackListBase::empty;
+
+  void push_back(T &N) { IntrusiveBackListBase::push_back(N); }
+  T &back() { return *static_cast<T *>(Last); }
+  const T &back() const { return *static_cast<T *>(Last); }
+
+  class const_iterator;
+  class iterator
+      : public iterator_facade_base<iterator, std::forward_iterator_tag, T> {
+    friend class const_iterator;
+
+    Node *N = nullptr;
+
+  public:
+    iterator() = default;
+    explicit iterator(T *N) : N(N) {}
+
+    iterator &operator++() {
+      N = N->getNext();
+      return *this;
+    }
+
+    explicit operator bool() const { return N; }
+    T &operator*() const { return *static_cast<T *>(N); }
+
+    bool operator==(const iterator &X) const { return N == X.N; }
+    bool operator!=(const iterator &X) const { return N != X.N; }
+  };
+
+  class const_iterator
+      : public iterator_facade_base<const_iterator, std::forward_iterator_tag,
+                                    const T> {
+    const Node *N = nullptr;
+
+  public:
+    const_iterator() = default;
+    // Placate MSVC by explicitly scoping 'iterator'.
+    const_iterator(typename IntrusiveBackList<T>::iterator X) : N(X.N) {}
+    explicit const_iterator(const T *N) : N(N) {}
+
+    const_iterator &operator++() {
+      N = N->getNext();
+      return *this;
+    }
+
+    explicit operator bool() const { return N; }
+    const T &operator*() const { return *static_cast<const T *>(N); }
+
+    bool operator==(const const_iterator &X) const { return N == X.N; }
+    bool operator!=(const const_iterator &X) const { return N != X.N; }
+  };
+
+  iterator begin() {
+    return Last ? iterator(static_cast<T *>(Last->Next.getPointer())) : end();
+  }
+  const_iterator begin() const {
+    return const_cast<IntrusiveBackList *>(this)->begin();
+  }
+  iterator end() { return iterator(); }
+  const_iterator end() const { return const_iterator(); }
+
+  static iterator toIterator(T &N) { return iterator(&N); }
+  static const_iterator toIterator(const T &N) { return const_iterator(&N); }
+};
+
+/// A list of DIE values.
+///
+/// This is a singly-linked list, but instead of reversing the order of
+/// insertion, we keep a pointer to the back of the list so we can push in
+/// order.
+///
+/// There are two main reasons to choose a linked list over a customized
+/// vector-like data structure.
+///
+///  1. For teardown efficiency, we want DIEs to be BumpPtrAllocated.  Using a
+///     linked list here makes this way easier to accomplish.
+///  2. Carrying an extra pointer per \a DIEValue isn't expensive.  45% of DIEs
+///     have 2 or fewer values, and 90% have 5 or fewer.  A vector would be
+///     over-allocated by 50% on average anyway, the same cost as the
+///     linked-list node.
+class DIEValueList {
+  struct Node : IntrusiveBackListNode {
+    DIEValue V;
+
+    explicit Node(DIEValue V) : V(V) {}
+  };
+
+  using ListTy = IntrusiveBackList<Node>;
+
+  ListTy List;
+
+public:
+  class const_value_iterator;
+  class value_iterator
+      : public iterator_adaptor_base<value_iterator, ListTy::iterator,
+                                     std::forward_iterator_tag, DIEValue> {
+    friend class const_value_iterator;
+
+    using iterator_adaptor =
+        iterator_adaptor_base<value_iterator, ListTy::iterator,
+                              std::forward_iterator_tag, DIEValue>;
+
+  public:
+    value_iterator() = default;
+    explicit value_iterator(ListTy::iterator X) : iterator_adaptor(X) {}
+
+    explicit operator bool() const { return bool(wrapped()); }
+    DIEValue &operator*() const { return wrapped()->V; }
+  };
+
+  class const_value_iterator : public iterator_adaptor_base<
+                                   const_value_iterator, ListTy::const_iterator,
+                                   std::forward_iterator_tag, const DIEValue> {
+    using iterator_adaptor =
+        iterator_adaptor_base<const_value_iterator, ListTy::const_iterator,
+                              std::forward_iterator_tag, const DIEValue>;
+
+  public:
+    const_value_iterator() = default;
+    const_value_iterator(DIEValueList::value_iterator X)
+        : iterator_adaptor(X.wrapped()) {}
+    explicit const_value_iterator(ListTy::const_iterator X)
+        : iterator_adaptor(X) {}
+
+    explicit operator bool() const { return bool(wrapped()); }
+    const DIEValue &operator*() const { return wrapped()->V; }
+  };
+
+  using value_range = iterator_range<value_iterator>;
+  using const_value_range = iterator_range<const_value_iterator>;
+
+  value_iterator addValue(BumpPtrAllocator &Alloc, const DIEValue &V) {
+    List.push_back(*new (Alloc) Node(V));
+    return value_iterator(ListTy::toIterator(List.back()));
+  }
+  template <class T>
+  value_iterator addValue(BumpPtrAllocator &Alloc, dwarf::Attribute Attribute,
+                    dwarf::Form Form, T &&Value) {
+    return addValue(Alloc, DIEValue(Attribute, Form, std::forward<T>(Value)));
+  }
+
+  value_range values() {
+    return make_range(value_iterator(List.begin()), value_iterator(List.end()));
+  }
+  const_value_range values() const {
+    return make_range(const_value_iterator(List.begin()),
+                      const_value_iterator(List.end()));
+  }
+};
+
+//===--------------------------------------------------------------------===//
+/// A structured debug information entry.  Has an abbreviation which
+/// describes its organization.
+class DIE : IntrusiveBackListNode, public DIEValueList {
+  friend class IntrusiveBackList<DIE>;
+  friend class DIEUnit;
+
+  /// Dwarf unit relative offset.
+  unsigned Offset = 0;
+  /// Size of instance + children.
+  unsigned Size = 0;
+  unsigned AbbrevNumber = ~0u;
+  /// Dwarf tag code.
+  dwarf::Tag Tag = (dwarf::Tag)0;
+  /// Set to true to force a DIE to emit an abbreviation that says it has
+  /// children even when it doesn't. This is used for unit testing purposes.
+  bool ForceChildren = false;
+  /// Children DIEs.
+  IntrusiveBackList<DIE> Children;
+
+  /// The owner is either the parent DIE for children of other DIEs, or a
+  /// DIEUnit which contains this DIE as its unit DIE.
+  PointerUnion<DIE *, DIEUnit *> Owner;
+
+  explicit DIE(dwarf::Tag Tag) : Tag(Tag) {}
+
+public:
+  DIE() = delete;
+  DIE(const DIE &RHS) = delete;
+  DIE(DIE &&RHS) = delete;
+  DIE &operator=(const DIE &RHS) = delete;
+  DIE &operator=(const DIE &&RHS) = delete;
+
+  static DIE *get(BumpPtrAllocator &Alloc, dwarf::Tag Tag) {
+    return new (Alloc) DIE(Tag);
+  }
+
+  // Accessors.
+  unsigned getAbbrevNumber() const { return AbbrevNumber; }
+  dwarf::Tag getTag() const { return Tag; }
+  /// Get the compile/type unit relative offset of this DIE.
+  unsigned getOffset() const { return Offset; }
+  unsigned getSize() const { return Size; }
+  bool hasChildren() const { return ForceChildren || !Children.empty(); }
+  void setForceChildren(bool B) { ForceChildren = B; }
+
+  using child_iterator = IntrusiveBackList<DIE>::iterator;
+  using const_child_iterator = IntrusiveBackList<DIE>::const_iterator;
+  using child_range = iterator_range<child_iterator>;
+  using const_child_range = iterator_range<const_child_iterator>;
+
+  child_range children() {
+    return make_range(Children.begin(), Children.end());
+  }
+  const_child_range children() const {
+    return make_range(Children.begin(), Children.end());
+  }
+
+  DIE *getParent() const;
+
+  /// Generate the abbreviation for this DIE.
+  ///
+  /// Calculate the abbreviation for this, which should be uniqued and
+  /// eventually used to call \a setAbbrevNumber().
+  DIEAbbrev generateAbbrev() const;
+
+  /// Set the abbreviation number for this DIE.
+  void setAbbrevNumber(unsigned I) { AbbrevNumber = I; }
+
+  /// Get the absolute offset within the .debug_info or .debug_types section
+  /// for this DIE.
+  unsigned getDebugSectionOffset() const;
+
+  /// Compute the offset of this DIE and all its children.
+  ///
+  /// This function gets called just before we are going to generate the debug
+  /// information and gives each DIE a chance to figure out its CU relative DIE
+  /// offset, unique its abbreviation and fill in the abbreviation code, and
+  /// return the unit offset that points to where the next DIE will be emitted
+  /// within the debug unit section. After this function has been called for all
+  /// DIE objects, the DWARF can be generated since all DIEs will be able to
+  /// properly refer to other DIE objects since all DIEs have calculated their
+  /// offsets.
+  ///
+  /// \param AP AsmPrinter to use when calculating sizes.
+  /// \param AbbrevSet the abbreviation used to unique DIE abbreviations.
+  /// \param CUOffset the compile/type unit relative offset in bytes.
+  /// \returns the offset for the DIE that follows this DIE within the
+  /// current compile/type unit.
+  unsigned computeOffsetsAndAbbrevs(const AsmPrinter *AP,
+                                    DIEAbbrevSet &AbbrevSet, unsigned CUOffset);
+
+  /// Climb up the parent chain to get the compile unit or type unit DIE that
+  /// this DIE belongs to.
+  ///
+  /// \returns the compile or type unit DIE that owns this DIE, or NULL if
+  /// this DIE hasn't been added to a unit DIE.
+  const DIE *getUnitDie() const;
+
+  /// Climb up the parent chain to get the compile unit or type unit that this
+  /// DIE belongs to.
+  ///
+  /// \returns the DIEUnit that represents the compile or type unit that owns
+  /// this DIE, or NULL if this DIE hasn't been added to a unit DIE.
+  const DIEUnit *getUnit() const;
+
+  void setOffset(unsigned O) { Offset = O; }
+  void setSize(unsigned S) { Size = S; }
+
+  /// Add a child to the DIE.
+  DIE &addChild(DIE *Child) {
+    assert(!Child->getParent() && "Child should be orphaned");
+    Child->Owner = this;
+    Children.push_back(*Child);
+    return Children.back();
+  }
+
+  /// Find a value in the DIE with the attribute given.
+  ///
+  /// Returns a default-constructed DIEValue (where \a DIEValue::getType()
+  /// gives \a DIEValue::isNone) if no such attribute exists.
+  DIEValue findAttribute(dwarf::Attribute Attribute) const;
+
+  void print(raw_ostream &O, unsigned IndentCount = 0) const;
+  void dump() const;
+};
+
+//===--------------------------------------------------------------------===//
+/// Represents a compile or type unit.
+class DIEUnit {
+  /// The compile unit or type unit DIE. This variable must be an instance of
+  /// DIE so that we can calculate the DIEUnit from any DIE by traversing the
+  /// parent backchain and getting the Unit DIE, and then casting itself to a
+  /// DIEUnit. This allows us to be able to find the DIEUnit for any DIE without
+  /// having to store a pointer to the DIEUnit in each DIE instance.
+  DIE Die;
+  /// The section this unit will be emitted in. This may or may not be set to
+  /// a valid section depending on the client that is emitting DWARF.
+  MCSection *Section;
+  uint64_t Offset; /// .debug_info or .debug_types absolute section offset.
+  uint32_t Length; /// The length in bytes of all of the DIEs in this unit.
+  const uint16_t Version; /// The Dwarf version number for this unit.
+  const uint8_t AddrSize; /// The size in bytes of an address for this unit.
+protected:
+  ~DIEUnit() = default;
+
+public:
+  DIEUnit(uint16_t Version, uint8_t AddrSize, dwarf::Tag UnitTag);
+  DIEUnit(const DIEUnit &RHS) = delete;
+  DIEUnit(DIEUnit &&RHS) = delete;
+  void operator=(const DIEUnit &RHS) = delete;
+  void operator=(const DIEUnit &&RHS) = delete;
+  /// Set the section that this DIEUnit will be emitted into.
+  ///
+  /// This function is used by some clients to set the section. Not all clients
+  /// that emit DWARF use this section variable.
+  void setSection(MCSection *Section) {
+    assert(!this->Section);
+    this->Section = Section;
+  }
+
+  virtual const MCSymbol *getCrossSectionRelativeBaseAddress() const {
+    return nullptr;
+  }
+
+  /// Return the section that this DIEUnit will be emitted into.
+  ///
+  /// \returns Section pointer which can be NULL.
+  MCSection *getSection() const { return Section; }
+  void setDebugSectionOffset(unsigned O) { Offset = O; }
+  unsigned getDebugSectionOffset() const { return Offset; }
+  void setLength(uint64_t L) { Length = L; }
+  uint64_t getLength() const { return Length; }
+  uint16_t getDwarfVersion() const { return Version; }
+  uint16_t getAddressSize() const { return AddrSize; }
+  DIE &getUnitDie() { return Die; }
+  const DIE &getUnitDie() const { return Die; }
+};
+
+struct BasicDIEUnit final : DIEUnit {
+  BasicDIEUnit(uint16_t Version, uint8_t AddrSize, dwarf::Tag UnitTag)
+      : DIEUnit(Version, AddrSize, UnitTag) {}
+};
+
+//===--------------------------------------------------------------------===//
+/// DIELoc - Represents an expression location.
+//
+class DIELoc : public DIEValueList {
+  mutable unsigned Size = 0; // Size in bytes excluding size header.
+
+public:
+  DIELoc() = default;
+
+  /// ComputeSize - Calculate the size of the location expression.
+  ///
+  unsigned ComputeSize(const AsmPrinter *AP) const;
+
+  /// BestForm - Choose the best form for data.
+  ///
+  dwarf::Form BestForm(unsigned DwarfVersion) const {
+    if (DwarfVersion > 3)
+      return dwarf::DW_FORM_exprloc;
+    // Pre-DWARF4 location expressions were blocks and not exprloc.
+    if ((unsigned char)Size == Size)
+      return dwarf::DW_FORM_block1;
+    if ((unsigned short)Size == Size)
+      return dwarf::DW_FORM_block2;
+    if ((unsigned int)Size == Size)
+      return dwarf::DW_FORM_block4;
+    return dwarf::DW_FORM_block;
+  }
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// DIEBlock - Represents a block of values.
+//
+class DIEBlock : public DIEValueList {
+  mutable unsigned Size = 0; // Size in bytes excluding size header.
+
+public:
+  DIEBlock() = default;
+
+  /// ComputeSize - Calculate the size of the location expression.
+  ///
+  unsigned ComputeSize(const AsmPrinter *AP) const;
+
+  /// BestForm - Choose the best form for data.
+  ///
+  dwarf::Form BestForm() const {
+    if ((unsigned char)Size == Size)
+      return dwarf::DW_FORM_block1;
+    if ((unsigned short)Size == Size)
+      return dwarf::DW_FORM_block2;
+    if ((unsigned int)Size == Size)
+      return dwarf::DW_FORM_block4;
+    return dwarf::DW_FORM_block;
+  }
+
+  void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+  unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+  void print(raw_ostream &O) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_CODEGEN_ASMPRINTER_DIE_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/DIEValue.def b/linux-x64/clang/include/llvm/CodeGen/DIEValue.def
new file mode 100644
index 0000000..a3fce9b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/DIEValue.def
@@ -0,0 +1,47 @@
+//===- llvm/CodeGen/DIEValue.def - DIEValue types ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Macros for running through all types of DIEValue.
+//
+//===----------------------------------------------------------------------===//
+
+#if !(defined HANDLE_DIEVALUE || defined HANDLE_DIEVALUE_SMALL ||              \
+      defined HANDLE_DIEVALUE_LARGE)
+#error "Missing macro definition of HANDLE_DIEVALUE"
+#endif
+
+// Handler for all values.
+#ifndef HANDLE_DIEVALUE
+#define HANDLE_DIEVALUE(T)
+#endif
+
+// Handler for small values.
+#ifndef HANDLE_DIEVALUE_SMALL
+#define HANDLE_DIEVALUE_SMALL(T) HANDLE_DIEVALUE(T)
+#endif
+
+// Handler for large values.
+#ifndef HANDLE_DIEVALUE_LARGE
+#define HANDLE_DIEVALUE_LARGE(T) HANDLE_DIEVALUE(T)
+#endif
+
+HANDLE_DIEVALUE_SMALL(Integer)
+HANDLE_DIEVALUE_SMALL(String)
+HANDLE_DIEVALUE_SMALL(Expr)
+HANDLE_DIEVALUE_SMALL(Label)
+HANDLE_DIEVALUE_LARGE(Delta)
+HANDLE_DIEVALUE_SMALL(Entry)
+HANDLE_DIEVALUE_LARGE(Block)
+HANDLE_DIEVALUE_LARGE(Loc)
+HANDLE_DIEVALUE_SMALL(LocList)
+HANDLE_DIEVALUE_LARGE(InlineString)
+
+#undef HANDLE_DIEVALUE
+#undef HANDLE_DIEVALUE_SMALL
+#undef HANDLE_DIEVALUE_LARGE
diff --git a/linux-x64/clang/include/llvm/CodeGen/DwarfStringPoolEntry.h b/linux-x64/clang/include/llvm/CodeGen/DwarfStringPoolEntry.h
new file mode 100644
index 0000000..e6c0483
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/DwarfStringPoolEntry.h
@@ -0,0 +1,53 @@
+//===- llvm/CodeGen/DwarfStringPoolEntry.h - String pool entry --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_DWARFSTRINGPOOLENTRY_H
+#define LLVM_CODEGEN_DWARFSTRINGPOOLENTRY_H
+
+#include "llvm/ADT/StringMap.h"
+
+namespace llvm {
+
+class MCSymbol;
+
+/// Data for a string pool entry.
+struct DwarfStringPoolEntry {
+  MCSymbol *Symbol;
+  unsigned Offset;
+  unsigned Index;
+};
+
+/// String pool entry reference.
+struct DwarfStringPoolEntryRef {
+  const StringMapEntry<DwarfStringPoolEntry> *I = nullptr;
+
+public:
+  DwarfStringPoolEntryRef() = default;
+  explicit DwarfStringPoolEntryRef(
+      const StringMapEntry<DwarfStringPoolEntry> &I)
+      : I(&I) {}
+
+  explicit operator bool() const { return I; }
+  MCSymbol *getSymbol() const {
+    assert(I->second.Symbol && "No symbol available!");
+    return I->second.Symbol;
+  }
+  unsigned getOffset() const { return I->second.Offset; }
+  unsigned getIndex() const { return I->second.Index; }
+  StringRef getString() const { return I->first(); }
+  /// Return the entire string pool entry for convenience.
+  DwarfStringPoolEntry getEntry() const { return I->getValue(); }
+
+  bool operator==(const DwarfStringPoolEntryRef &X) const { return I == X.I; }
+  bool operator!=(const DwarfStringPoolEntryRef &X) const { return I != X.I; }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/EdgeBundles.h b/linux-x64/clang/include/llvm/CodeGen/EdgeBundles.h
new file mode 100644
index 0000000..c31fad2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/EdgeBundles.h
@@ -0,0 +1,64 @@
+//===-------- EdgeBundles.h - Bundles of CFG edges --------------*- c++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The EdgeBundles analysis forms equivalence classes of CFG edges such that all
+// edges leaving a machine basic block are in the same bundle, and all edges
+// leaving a basic block are in the same bundle.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_EDGEBUNDLES_H
+#define LLVM_CODEGEN_EDGEBUNDLES_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/IntEqClasses.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+class EdgeBundles : public MachineFunctionPass {
+  const MachineFunction *MF;
+
+  /// EC - Each edge bundle is an equivalence class. The keys are:
+  ///   2*BB->getNumber()   -> Ingoing bundle.
+  ///   2*BB->getNumber()+1 -> Outgoing bundle.
+  IntEqClasses EC;
+
+  /// Blocks - Map each bundle to a list of basic block numbers.
+  SmallVector<SmallVector<unsigned, 8>, 4> Blocks;
+
+public:
+  static char ID;
+  EdgeBundles() : MachineFunctionPass(ID) {}
+
+  /// getBundle - Return the ingoing (Out = false) or outgoing (Out = true)
+  /// bundle number for basic block #N
+  unsigned getBundle(unsigned N, bool Out) const { return EC[2 * N + Out]; }
+
+  /// getNumBundles - Return the total number of bundles in the CFG.
+  unsigned getNumBundles() const { return EC.getNumClasses(); }
+
+  /// getBlocks - Return an array of blocks that are connected to Bundle.
+  ArrayRef<unsigned> getBlocks(unsigned Bundle) const { return Blocks[Bundle]; }
+
+  /// getMachineFunction - Return the last machine function computed.
+  const MachineFunction *getMachineFunction() const { return MF; }
+
+  /// view - Visualize the annotated bipartite CFG with Graphviz.
+  void view() const;
+
+private:
+  bool runOnMachineFunction(MachineFunction&) override;
+  void getAnalysisUsage(AnalysisUsage&) const override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/ExecutionDomainFix.h b/linux-x64/clang/include/llvm/CodeGen/ExecutionDomainFix.h
new file mode 100644
index 0000000..338c214
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ExecutionDomainFix.h
@@ -0,0 +1,213 @@
+//==-- llvm/CodeGen/ExecutionDomainFix.h - Execution Domain Fix -*- C++ -*--==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Execution Domain Fix pass.
+///
+/// Some X86 SSE instructions like mov, and, or, xor are available in different
+/// variants for different operand types. These variant instructions are
+/// equivalent, but on Nehalem and newer cpus there is extra latency
+/// transferring data between integer and floating point domains.  ARM cores
+/// have similar issues when they are configured with both VFP and NEON
+/// pipelines.
+///
+/// This pass changes the variant instructions to minimize domain crossings.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_EXECUTIONDOMAINFIX_H
+#define LLVM_CODEGEN_EXECUTIONDOMAINFIX_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/LoopTraversal.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/ReachingDefAnalysis.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineInstr;
+class TargetInstrInfo;
+
+/// A DomainValue is a bit like LiveIntervals' ValNo, but it also keeps track
+/// of execution domains.
+///
+/// An open DomainValue represents a set of instructions that can still switch
+/// execution domain. Multiple registers may refer to the same open
+/// DomainValue - they will eventually be collapsed to the same execution
+/// domain.
+///
+/// A collapsed DomainValue represents a single register that has been forced
+/// into one of more execution domains. There is a separate collapsed
+/// DomainValue for each register, but it may contain multiple execution
+/// domains. A register value is initially created in a single execution
+/// domain, but if we were forced to pay the penalty of a domain crossing, we
+/// keep track of the fact that the register is now available in multiple
+/// domains.
+struct DomainValue {
+  /// Basic reference counting.
+  unsigned Refs = 0;
+
+  /// Bitmask of available domains. For an open DomainValue, it is the still
+  /// possible domains for collapsing. For a collapsed DomainValue it is the
+  /// domains where the register is available for free.
+  unsigned AvailableDomains;
+
+  /// Pointer to the next DomainValue in a chain.  When two DomainValues are
+  /// merged, Victim.Next is set to point to Victor, so old DomainValue
+  /// references can be updated by following the chain.
+  DomainValue *Next;
+
+  /// Twiddleable instructions using or defining these registers.
+  SmallVector<MachineInstr *, 8> Instrs;
+
+  DomainValue() { clear(); }
+
+  /// A collapsed DomainValue has no instructions to twiddle - it simply keeps
+  /// track of the domains where the registers are already available.
+  bool isCollapsed() const { return Instrs.empty(); }
+
+  /// Is domain available?
+  bool hasDomain(unsigned domain) const {
+    assert(domain <
+               static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
+           "undefined behavior");
+    return AvailableDomains & (1u << domain);
+  }
+
+  /// Mark domain as available.
+  void addDomain(unsigned domain) { AvailableDomains |= 1u << domain; }
+
+  // Restrict to a single domain available.
+  void setSingleDomain(unsigned domain) { AvailableDomains = 1u << domain; }
+
+  /// Return bitmask of domains that are available and in mask.
+  unsigned getCommonDomains(unsigned mask) const {
+    return AvailableDomains & mask;
+  }
+
+  /// First domain available.
+  unsigned getFirstDomain() const {
+    return countTrailingZeros(AvailableDomains);
+  }
+
+  /// Clear this DomainValue and point to next which has all its data.
+  void clear() {
+    AvailableDomains = 0;
+    Next = nullptr;
+    Instrs.clear();
+  }
+};
+
+class ExecutionDomainFix : public MachineFunctionPass {
+  SpecificBumpPtrAllocator<DomainValue> Allocator;
+  SmallVector<DomainValue *, 16> Avail;
+
+  const TargetRegisterClass *const RC;
+  MachineFunction *MF;
+  const TargetInstrInfo *TII;
+  const TargetRegisterInfo *TRI;
+  std::vector<SmallVector<int, 1>> AliasMap;
+  const unsigned NumRegs;
+  /// Value currently in each register, or NULL when no value is being tracked.
+  /// This counts as a DomainValue reference.
+  using LiveRegsDVInfo = std::vector<DomainValue *>;
+  LiveRegsDVInfo LiveRegs;
+  /// Keeps domain information for all registers. Note that this
+  /// is different from the usual definition notion of liveness. The CPU
+  /// doesn't care whether or not we consider a register killed.
+  using OutRegsInfoMap = SmallVector<LiveRegsDVInfo, 4>;
+  OutRegsInfoMap MBBOutRegsInfos;
+
+  ReachingDefAnalysis *RDA;
+
+public:
+  ExecutionDomainFix(char &PassID, const TargetRegisterClass &RC)
+      : MachineFunctionPass(PassID), RC(&RC), NumRegs(RC.getNumRegs()) {}
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+    AU.addRequired<ReachingDefAnalysis>();
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  MachineFunctionProperties getRequiredProperties() const override {
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::NoVRegs);
+  }
+
+private:
+  /// Translate TRI register number to a list of indices into our smaller tables
+  /// of interesting registers.
+  iterator_range<SmallVectorImpl<int>::const_iterator>
+  regIndices(unsigned Reg) const;
+
+  /// DomainValue allocation.
+  DomainValue *alloc(int domain = -1);
+
+  /// Add reference to DV.
+  DomainValue *retain(DomainValue *DV) {
+    if (DV)
+      ++DV->Refs;
+    return DV;
+  }
+
+  /// Release a reference to DV.  When the last reference is released,
+  /// collapse if needed.
+  void release(DomainValue *);
+
+  /// Follow the chain of dead DomainValues until a live DomainValue is reached.
+  /// Update the referenced pointer when necessary.
+  DomainValue *resolve(DomainValue *&);
+
+  /// Set LiveRegs[rx] = dv, updating reference counts.
+  void setLiveReg(int rx, DomainValue *DV);
+
+  /// Kill register rx, recycle or collapse any DomainValue.
+  void kill(int rx);
+
+  /// Force register rx into domain.
+  void force(int rx, unsigned domain);
+
+  /// Collapse open DomainValue into given domain. If there are multiple
+  /// registers using dv, they each get a unique collapsed DomainValue.
+  void collapse(DomainValue *dv, unsigned domain);
+
+  /// All instructions and registers in B are moved to A, and B is released.
+  bool merge(DomainValue *A, DomainValue *B);
+
+  /// Set up LiveRegs by merging predecessor live-out values.
+  void enterBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);
+
+  /// Update live-out values.
+  void leaveBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);
+
+  /// Process he given basic block.
+  void processBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);
+
+  /// Visit given insturcion.
+  bool visitInstr(MachineInstr *);
+
+  /// Update def-ages for registers defined by MI.
+  /// If Kill is set, also kill off DomainValues clobbered by the defs.
+  void processDefs(MachineInstr *, bool Kill);
+
+  /// A soft instruction can be changed to work in other domains given by mask.
+  void visitSoftInstr(MachineInstr *, unsigned mask);
+
+  /// A hard instruction only works in one domain. All input registers will be
+  /// forced into that domain.
+  void visitHardInstr(MachineInstr *, unsigned domain);
+};
+
+} // namespace llvm
+
+#endif // LLVM_CODEGEN_EXECUTIONDOMAINFIX_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ExpandReductions.h b/linux-x64/clang/include/llvm/CodeGen/ExpandReductions.h
new file mode 100644
index 0000000..c6aaaad
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ExpandReductions.h
@@ -0,0 +1,24 @@
+//===----- ExpandReductions.h - Expand experimental reduction intrinsics --===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_EXPANDREDUCTIONS_H
+#define LLVM_CODEGEN_EXPANDREDUCTIONS_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class ExpandReductionsPass
+    : public PassInfoMixin<ExpandReductionsPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_EXPANDREDUCTIONS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/FastISel.h b/linux-x64/clang/include/llvm/CodeGen/FastISel.h
new file mode 100644
index 0000000..772bd6c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/FastISel.h
@@ -0,0 +1,593 @@
+//===- FastISel.h - Definition of the FastISel class ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the FastISel class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_FASTISEL_H
+#define LLVM_CODEGEN_FASTISEL_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/Support/MachineValueType.h"
+#include <algorithm>
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+class AllocaInst;
+class BasicBlock;
+class CallInst;
+class Constant;
+class ConstantFP;
+class DataLayout;
+class FunctionLoweringInfo;
+class LoadInst;
+class MachineConstantPool;
+class MachineFrameInfo;
+class MachineFunction;
+class MachineInstr;
+class MachineMemOperand;
+class MachineOperand;
+class MachineRegisterInfo;
+class MCContext;
+class MCInstrDesc;
+class MCSymbol;
+class TargetInstrInfo;
+class TargetLibraryInfo;
+class TargetMachine;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+class Type;
+class User;
+class Value;
+
+/// \brief This is a fast-path instruction selection class that generates poor
+/// code and doesn't support illegal types or non-trivial lowering, but runs
+/// quickly.
+class FastISel {
+public:
+  using ArgListEntry = TargetLoweringBase::ArgListEntry;
+  using ArgListTy = TargetLoweringBase::ArgListTy;
+  struct CallLoweringInfo {
+    Type *RetTy = nullptr;
+    bool RetSExt : 1;
+    bool RetZExt : 1;
+    bool IsVarArg : 1;
+    bool IsInReg : 1;
+    bool DoesNotReturn : 1;
+    bool IsReturnValueUsed : 1;
+    bool IsPatchPoint : 1;
+
+    // \brief IsTailCall Should be modified by implementations of FastLowerCall
+    // that perform tail call conversions.
+    bool IsTailCall = false;
+
+    unsigned NumFixedArgs = -1;
+    CallingConv::ID CallConv = CallingConv::C;
+    const Value *Callee = nullptr;
+    MCSymbol *Symbol = nullptr;
+    ArgListTy Args;
+    ImmutableCallSite *CS = nullptr;
+    MachineInstr *Call = nullptr;
+    unsigned ResultReg = 0;
+    unsigned NumResultRegs = 0;
+
+    SmallVector<Value *, 16> OutVals;
+    SmallVector<ISD::ArgFlagsTy, 16> OutFlags;
+    SmallVector<unsigned, 16> OutRegs;
+    SmallVector<ISD::InputArg, 4> Ins;
+    SmallVector<unsigned, 4> InRegs;
+
+    CallLoweringInfo()
+        : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false),
+          DoesNotReturn(false), IsReturnValueUsed(true), IsPatchPoint(false) {}
+
+    CallLoweringInfo &setCallee(Type *ResultTy, FunctionType *FuncTy,
+                                const Value *Target, ArgListTy &&ArgsList,
+                                ImmutableCallSite &Call) {
+      RetTy = ResultTy;
+      Callee = Target;
+
+      IsInReg = Call.hasRetAttr(Attribute::InReg);
+      DoesNotReturn = Call.doesNotReturn();
+      IsVarArg = FuncTy->isVarArg();
+      IsReturnValueUsed = !Call.getInstruction()->use_empty();
+      RetSExt = Call.hasRetAttr(Attribute::SExt);
+      RetZExt = Call.hasRetAttr(Attribute::ZExt);
+
+      CallConv = Call.getCallingConv();
+      Args = std::move(ArgsList);
+      NumFixedArgs = FuncTy->getNumParams();
+
+      CS = &Call;
+
+      return *this;
+    }
+
+    CallLoweringInfo &setCallee(Type *ResultTy, FunctionType *FuncTy,
+                                MCSymbol *Target, ArgListTy &&ArgsList,
+                                ImmutableCallSite &Call,
+                                unsigned FixedArgs = ~0U) {
+      RetTy = ResultTy;
+      Callee = Call.getCalledValue();
+      Symbol = Target;
+
+      IsInReg = Call.hasRetAttr(Attribute::InReg);
+      DoesNotReturn = Call.doesNotReturn();
+      IsVarArg = FuncTy->isVarArg();
+      IsReturnValueUsed = !Call.getInstruction()->use_empty();
+      RetSExt = Call.hasRetAttr(Attribute::SExt);
+      RetZExt = Call.hasRetAttr(Attribute::ZExt);
+
+      CallConv = Call.getCallingConv();
+      Args = std::move(ArgsList);
+      NumFixedArgs = (FixedArgs == ~0U) ? FuncTy->getNumParams() : FixedArgs;
+
+      CS = &Call;
+
+      return *this;
+    }
+
+    CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultTy,
+                                const Value *Target, ArgListTy &&ArgsList,
+                                unsigned FixedArgs = ~0U) {
+      RetTy = ResultTy;
+      Callee = Target;
+      CallConv = CC;
+      Args = std::move(ArgsList);
+      NumFixedArgs = (FixedArgs == ~0U) ? Args.size() : FixedArgs;
+      return *this;
+    }
+
+    CallLoweringInfo &setCallee(const DataLayout &DL, MCContext &Ctx,
+                                CallingConv::ID CC, Type *ResultTy,
+                                StringRef Target, ArgListTy &&ArgsList,
+                                unsigned FixedArgs = ~0U);
+
+    CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultTy,
+                                MCSymbol *Target, ArgListTy &&ArgsList,
+                                unsigned FixedArgs = ~0U) {
+      RetTy = ResultTy;
+      Symbol = Target;
+      CallConv = CC;
+      Args = std::move(ArgsList);
+      NumFixedArgs = (FixedArgs == ~0U) ? Args.size() : FixedArgs;
+      return *this;
+    }
+
+    CallLoweringInfo &setTailCall(bool Value = true) {
+      IsTailCall = Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setIsPatchPoint(bool Value = true) {
+      IsPatchPoint = Value;
+      return *this;
+    }
+
+    ArgListTy &getArgs() { return Args; }
+
+    void clearOuts() {
+      OutVals.clear();
+      OutFlags.clear();
+      OutRegs.clear();
+    }
+
+    void clearIns() {
+      Ins.clear();
+      InRegs.clear();
+    }
+  };
+
+protected:
+  DenseMap<const Value *, unsigned> LocalValueMap;
+  FunctionLoweringInfo &FuncInfo;
+  MachineFunction *MF;
+  MachineRegisterInfo &MRI;
+  MachineFrameInfo &MFI;
+  MachineConstantPool &MCP;
+  DebugLoc DbgLoc;
+  const TargetMachine &TM;
+  const DataLayout &DL;
+  const TargetInstrInfo &TII;
+  const TargetLowering &TLI;
+  const TargetRegisterInfo &TRI;
+  const TargetLibraryInfo *LibInfo;
+  bool SkipTargetIndependentISel;
+
+  /// \brief The position of the last instruction for materializing constants
+  /// for use in the current block. It resets to EmitStartPt when it makes sense
+  /// (for example, it's usually profitable to avoid function calls between the
+  /// definition and the use)
+  MachineInstr *LastLocalValue;
+
+  /// \brief The top most instruction in the current block that is allowed for
+  /// emitting local variables. LastLocalValue resets to EmitStartPt when it
+  /// makes sense (for example, on function calls)
+  MachineInstr *EmitStartPt;
+
+public:
+  virtual ~FastISel();
+
+  /// \brief Return the position of the last instruction emitted for
+  /// materializing constants for use in the current block.
+  MachineInstr *getLastLocalValue() { return LastLocalValue; }
+
+  /// \brief Update the position of the last instruction emitted for
+  /// materializing constants for use in the current block.
+  void setLastLocalValue(MachineInstr *I) {
+    EmitStartPt = I;
+    LastLocalValue = I;
+  }
+
+  /// \brief Set the current block to which generated machine instructions will
+  /// be appended.
+  void startNewBlock();
+
+  /// Flush the local value map and sink local values if possible.
+  void finishBasicBlock();
+
+  /// \brief Return current debug location information.
+  DebugLoc getCurDebugLoc() const { return DbgLoc; }
+
+  /// \brief Do "fast" instruction selection for function arguments and append
+  /// the machine instructions to the current block. Returns true when
+  /// successful.
+  bool lowerArguments();
+
+  /// \brief Do "fast" instruction selection for the given LLVM IR instruction
+  /// and append the generated machine instructions to the current block.
+  /// Returns true if selection was successful.
+  bool selectInstruction(const Instruction *I);
+
+  /// \brief Do "fast" instruction selection for the given LLVM IR operator
+  /// (Instruction or ConstantExpr), and append generated machine instructions
+  /// to the current block. Return true if selection was successful.
+  bool selectOperator(const User *I, unsigned Opcode);
+
+  /// \brief Create a virtual register and arrange for it to be assigned the
+  /// value for the given LLVM value.
+  unsigned getRegForValue(const Value *V);
+
+  /// \brief Look up the value to see if its value is already cached in a
+  /// register. It may be defined by instructions across blocks or defined
+  /// locally.
+  unsigned lookUpRegForValue(const Value *V);
+
+  /// \brief This is a wrapper around getRegForValue that also takes care of
+  /// truncating or sign-extending the given getelementptr index value.
+  std::pair<unsigned, bool> getRegForGEPIndex(const Value *V);
+
+  /// \brief We're checking to see if we can fold \p LI into \p FoldInst. Note
+  /// that we could have a sequence where multiple LLVM IR instructions are
+  /// folded into the same machineinstr.  For example we could have:
+  ///
+  ///   A: x = load i32 *P
+  ///   B: y = icmp A, 42
+  ///   C: br y, ...
+  ///
+  /// In this scenario, \p LI is "A", and \p FoldInst is "C".  We know about "B"
+  /// (and any other folded instructions) because it is between A and C.
+  ///
+  /// If we succeed folding, return true.
+  bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst);
+
+  /// \brief The specified machine instr operand is a vreg, and that vreg is
+  /// being provided by the specified load instruction.  If possible, try to
+  /// fold the load as an operand to the instruction, returning true if
+  /// possible.
+  ///
+  /// This method should be implemented by targets.
+  virtual bool tryToFoldLoadIntoMI(MachineInstr * /*MI*/, unsigned /*OpNo*/,
+                                   const LoadInst * /*LI*/) {
+    return false;
+  }
+
+  /// \brief Reset InsertPt to prepare for inserting instructions into the
+  /// current block.
+  void recomputeInsertPt();
+
+  /// \brief Remove all dead instructions between the I and E.
+  void removeDeadCode(MachineBasicBlock::iterator I,
+                      MachineBasicBlock::iterator E);
+
+  struct SavePoint {
+    MachineBasicBlock::iterator InsertPt;
+    DebugLoc DL;
+  };
+
+  /// \brief Prepare InsertPt to begin inserting instructions into the local
+  /// value area and return the old insert position.
+  SavePoint enterLocalValueArea();
+
+  /// \brief Reset InsertPt to the given old insert position.
+  void leaveLocalValueArea(SavePoint Old);
+
+protected:
+  explicit FastISel(FunctionLoweringInfo &FuncInfo,
+                    const TargetLibraryInfo *LibInfo,
+                    bool SkipTargetIndependentISel = false);
+
+  /// \brief This method is called by target-independent code when the normal
+  /// FastISel process fails to select an instruction. This gives targets a
+  /// chance to emit code for anything that doesn't fit into FastISel's
+  /// framework. It returns true if it was successful.
+  virtual bool fastSelectInstruction(const Instruction *I) = 0;
+
+  /// \brief This method is called by target-independent code to do target-
+  /// specific argument lowering. It returns true if it was successful.
+  virtual bool fastLowerArguments();
+
+  /// \brief This method is called by target-independent code to do target-
+  /// specific call lowering. It returns true if it was successful.
+  virtual bool fastLowerCall(CallLoweringInfo &CLI);
+
+  /// \brief This method is called by target-independent code to do target-
+  /// specific intrinsic lowering. It returns true if it was successful.
+  virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II);
+
+  /// \brief This method is called by target-independent code to request that an
+  /// instruction with the given type and opcode be emitted.
+  virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode);
+
+  /// \brief This method is called by target-independent code to request that an
+  /// instruction with the given type, opcode, and register operand be emitted.
+  virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
+                              bool Op0IsKill);
+
+  /// \brief This method is called by target-independent code to request that an
+  /// instruction with the given type, opcode, and register operands be emitted.
+  virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
+                               bool Op0IsKill, unsigned Op1, bool Op1IsKill);
+
+  /// \brief This method is called by target-independent code to request that an
+  /// instruction with the given type, opcode, and register and immediate
+  /// operands be emitted.
+  virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
+                               bool Op0IsKill, uint64_t Imm);
+
+  /// \brief This method is a wrapper of fastEmit_ri.
+  ///
+  /// It first tries to emit an instruction with an immediate operand using
+  /// fastEmit_ri.  If that fails, it materializes the immediate into a register
+  /// and try fastEmit_rr instead.
+  unsigned fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill,
+                        uint64_t Imm, MVT ImmType);
+
+  /// \brief This method is called by target-independent code to request that an
+  /// instruction with the given type, opcode, and immediate operand be emitted.
+  virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm);
+
+  /// \brief This method is called by target-independent code to request that an
+  /// instruction with the given type, opcode, and floating-point immediate
+  /// operand be emitted.
+  virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode,
+                              const ConstantFP *FPImm);
+
+  /// \brief Emit a MachineInstr with no operands and a result register in the
+  /// given register class.
+  unsigned fastEmitInst_(unsigned MachineInstOpcode,
+                         const TargetRegisterClass *RC);
+
+  /// \brief Emit a MachineInstr with one register operand and a result register
+  /// in the given register class.
+  unsigned fastEmitInst_r(unsigned MachineInstOpcode,
+                          const TargetRegisterClass *RC, unsigned Op0,
+                          bool Op0IsKill);
+
+  /// \brief Emit a MachineInstr with two register operands and a result
+  /// register in the given register class.
+  unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
+                           const TargetRegisterClass *RC, unsigned Op0,
+                           bool Op0IsKill, unsigned Op1, bool Op1IsKill);
+
+  /// \brief Emit a MachineInstr with three register operands and a result
+  /// register in the given register class.
+  unsigned fastEmitInst_rrr(unsigned MachineInstOpcode,
+                            const TargetRegisterClass *RC, unsigned Op0,
+                            bool Op0IsKill, unsigned Op1, bool Op1IsKill,
+                            unsigned Op2, bool Op2IsKill);
+
+  /// \brief Emit a MachineInstr with a register operand, an immediate, and a
+  /// result register in the given register class.
+  unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
+                           const TargetRegisterClass *RC, unsigned Op0,
+                           bool Op0IsKill, uint64_t Imm);
+
+  /// \brief Emit a MachineInstr with one register operand and two immediate
+  /// operands.
+  unsigned fastEmitInst_rii(unsigned MachineInstOpcode,
+                            const TargetRegisterClass *RC, unsigned Op0,
+                            bool Op0IsKill, uint64_t Imm1, uint64_t Imm2);
+
+  /// \brief Emit a MachineInstr with a floating point immediate, and a result
+  /// register in the given register class.
+  unsigned fastEmitInst_f(unsigned MachineInstOpcode,
+                          const TargetRegisterClass *RC,
+                          const ConstantFP *FPImm);
+
+  /// \brief Emit a MachineInstr with two register operands, an immediate, and a
+  /// result register in the given register class.
+  unsigned fastEmitInst_rri(unsigned MachineInstOpcode,
+                            const TargetRegisterClass *RC, unsigned Op0,
+                            bool Op0IsKill, unsigned Op1, bool Op1IsKill,
+                            uint64_t Imm);
+
+  /// \brief Emit a MachineInstr with a single immediate operand, and a result
+  /// register in the given register class.
+  unsigned fastEmitInst_i(unsigned MachineInstrOpcode,
+                          const TargetRegisterClass *RC, uint64_t Imm);
+
+  /// \brief Emit a MachineInstr for an extract_subreg from a specified index of
+  /// a superregister to a specified type.
+  unsigned fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill,
+                                      uint32_t Idx);
+
+  /// \brief Emit MachineInstrs to compute the value of Op with all but the
+  /// least significant bit set to zero.
+  unsigned fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill);
+
+  /// \brief Emit an unconditional branch to the given block, unless it is the
+  /// immediate (fall-through) successor, and update the CFG.
+  void fastEmitBranch(MachineBasicBlock *MBB, const DebugLoc &DL);
+
+  /// Emit an unconditional branch to \p FalseMBB, obtains the branch weight
+  /// and adds TrueMBB and FalseMBB to the successor list.
+  void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB,
+                        MachineBasicBlock *FalseMBB);
+
+  /// \brief Update the value map to include the new mapping for this
+  /// instruction, or insert an extra copy to get the result in a previous
+  /// determined register.
+  ///
+  /// NOTE: This is only necessary because we might select a block that uses a
+  /// value before we select the block that defines the value. It might be
+  /// possible to fix this by selecting blocks in reverse postorder.
+  void updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs = 1);
+
+  unsigned createResultReg(const TargetRegisterClass *RC);
+
+  /// \brief Try to constrain Op so that it is usable by argument OpNum of the
+  /// provided MCInstrDesc. If this fails, create a new virtual register in the
+  /// correct class and COPY the value there.
+  unsigned constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
+                                    unsigned OpNum);
+
+  /// \brief Emit a constant in a register using target-specific logic, such as
+  /// constant pool loads.
+  virtual unsigned fastMaterializeConstant(const Constant *C) { return 0; }
+
+  /// \brief Emit an alloca address in a register using target-specific logic.
+  virtual unsigned fastMaterializeAlloca(const AllocaInst *C) { return 0; }
+
+  /// \brief Emit the floating-point constant +0.0 in a register using target-
+  /// specific logic.
+  virtual unsigned fastMaterializeFloatZero(const ConstantFP *CF) {
+    return 0;
+  }
+
+  /// \brief Check if \c Add is an add that can be safely folded into \c GEP.
+  ///
+  /// \c Add can be folded into \c GEP if:
+  /// - \c Add is an add,
+  /// - \c Add's size matches \c GEP's,
+  /// - \c Add is in the same basic block as \c GEP, and
+  /// - \c Add has a constant operand.
+  bool canFoldAddIntoGEP(const User *GEP, const Value *Add);
+
+  /// \brief Test whether the given value has exactly one use.
+  bool hasTrivialKill(const Value *V);
+
+  /// \brief Create a machine mem operand from the given instruction.
+  MachineMemOperand *createMachineMemOperandFor(const Instruction *I) const;
+
+  CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const;
+
+  bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs);
+  bool lowerCallTo(const CallInst *CI, const char *SymbolName,
+                   unsigned NumArgs);
+  bool lowerCallTo(CallLoweringInfo &CLI);
+
+  bool isCommutativeIntrinsic(IntrinsicInst const *II) {
+    switch (II->getIntrinsicID()) {
+    case Intrinsic::sadd_with_overflow:
+    case Intrinsic::uadd_with_overflow:
+    case Intrinsic::smul_with_overflow:
+    case Intrinsic::umul_with_overflow:
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  bool lowerCall(const CallInst *I);
+  /// \brief Select and emit code for a binary operator instruction, which has
+  /// an opcode which directly corresponds to the given ISD opcode.
+  bool selectBinaryOp(const User *I, unsigned ISDOpcode);
+  bool selectFNeg(const User *I);
+  bool selectGetElementPtr(const User *I);
+  bool selectStackmap(const CallInst *I);
+  bool selectPatchpoint(const CallInst *I);
+  bool selectCall(const User *Call);
+  bool selectIntrinsicCall(const IntrinsicInst *II);
+  bool selectBitCast(const User *I);
+  bool selectCast(const User *I, unsigned Opcode);
+  bool selectExtractValue(const User *I);
+  bool selectInsertValue(const User *I);
+  bool selectXRayCustomEvent(const CallInst *II);
+
+private:
+  /// \brief Handle PHI nodes in successor blocks.
+  ///
+  /// Emit code to ensure constants are copied into registers when needed.
+  /// Remember the virtual registers that need to be added to the Machine PHI
+  /// nodes as input.  We cannot just directly add them, because expansion might
+  /// result in multiple MBB's for one BB.  As such, the start of the BB might
+  /// correspond to a different MBB than the end.
+  bool handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);
+
+  /// \brief Helper for materializeRegForValue to materialize a constant in a
+  /// target-independent way.
+  unsigned materializeConstant(const Value *V, MVT VT);
+
+  /// \brief Helper for getRegForVale. This function is called when the value
+  /// isn't already available in a register and must be materialized with new
+  /// instructions.
+  unsigned materializeRegForValue(const Value *V, MVT VT);
+
+  /// \brief Clears LocalValueMap and moves the area for the new local variables
+  /// to the beginning of the block. It helps to avoid spilling cached variables
+  /// across heavy instructions like calls.
+  void flushLocalValueMap();
+
+  /// \brief Removes dead local value instructions after SavedLastLocalvalue.
+  void removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue);
+
+  struct InstOrderMap {
+    DenseMap<MachineInstr *, unsigned> Orders;
+    MachineInstr *FirstTerminator = nullptr;
+    unsigned FirstTerminatorOrder = std::numeric_limits<unsigned>::max();
+
+    void initialize(MachineBasicBlock *MBB);
+  };
+
+  /// Sinks the local value materialization instruction LocalMI to its first use
+  /// in the basic block, or deletes it if it is not used.
+  void sinkLocalValueMaterialization(MachineInstr &LocalMI, unsigned DefReg,
+                                     InstOrderMap &OrderMap);
+
+  /// \brief Insertion point before trying to select the current instruction.
+  MachineBasicBlock::iterator SavedInsertPt;
+
+  /// \brief Add a stackmap or patchpoint intrinsic call's live variable
+  /// operands to a stackmap or patchpoint machine instruction.
+  bool addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
+                           const CallInst *CI, unsigned StartIdx);
+  bool lowerCallOperands(const CallInst *CI, unsigned ArgIdx, unsigned NumArgs,
+                         const Value *Callee, bool ForceRetVoidTy,
+                         CallLoweringInfo &CLI);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_FASTISEL_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/FaultMaps.h b/linux-x64/clang/include/llvm/CodeGen/FaultMaps.h
new file mode 100644
index 0000000..55e25c9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/FaultMaps.h
@@ -0,0 +1,218 @@
+//===- FaultMaps.h - The "FaultMaps" section --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_FAULTMAPS_H
+#define LLVM_CODEGEN_FAULTMAPS_H
+
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Endian.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <map>
+#include <vector>
+
+namespace llvm {
+
+class AsmPrinter;
+class MCExpr;
+class raw_ostream;
+
+class FaultMaps {
+public:
+  enum FaultKind {
+    FaultingLoad = 1,
+    FaultingLoadStore,
+    FaultingStore,
+    FaultKindMax
+  };
+
+  explicit FaultMaps(AsmPrinter &AP);
+
+  static const char *faultTypeToString(FaultKind);
+
+  void recordFaultingOp(FaultKind FaultTy, const MCSymbol *HandlerLabel);
+  void serializeToFaultMapSection();
+  void reset() {
+    FunctionInfos.clear();
+  }
+
+private:
+  static const char *WFMP;
+
+  struct FaultInfo {
+    FaultKind Kind = FaultKindMax;
+    const MCExpr *FaultingOffsetExpr = nullptr;
+    const MCExpr *HandlerOffsetExpr = nullptr;
+
+    FaultInfo() = default;
+
+    explicit FaultInfo(FaultMaps::FaultKind Kind, const MCExpr *FaultingOffset,
+                       const MCExpr *HandlerOffset)
+        : Kind(Kind), FaultingOffsetExpr(FaultingOffset),
+          HandlerOffsetExpr(HandlerOffset) {}
+  };
+
+  using FunctionFaultInfos = std::vector<FaultInfo>;
+
+  // We'd like to keep a stable iteration order for FunctionInfos to help
+  // FileCheck based testing.
+  struct MCSymbolComparator {
+    bool operator()(const MCSymbol *LHS, const MCSymbol *RHS) const {
+      return LHS->getName() < RHS->getName();
+    }
+  };
+
+  std::map<const MCSymbol *, FunctionFaultInfos, MCSymbolComparator>
+      FunctionInfos;
+  AsmPrinter &AP;
+
+  void emitFunctionInfo(const MCSymbol *FnLabel, const FunctionFaultInfos &FFI);
+};
+
+/// A parser for the __llvm_faultmaps section generated by the FaultMaps class
+/// above.  This parser is version locked with with the __llvm_faultmaps section
+/// generated by the version of LLVM that includes it.  No guarantees are made
+/// with respect to forward or backward compatibility.
+class FaultMapParser {
+  using FaultMapVersionType = uint8_t;
+  using Reserved0Type = uint8_t;
+  using Reserved1Type = uint16_t;
+  using NumFunctionsType = uint32_t;
+
+  static const size_t FaultMapVersionOffset = 0;
+  static const size_t Reserved0Offset =
+      FaultMapVersionOffset + sizeof(FaultMapVersionType);
+  static const size_t Reserved1Offset = Reserved0Offset + sizeof(Reserved0Type);
+  static const size_t NumFunctionsOffset =
+      Reserved1Offset + sizeof(Reserved1Type);
+  static const size_t FunctionInfosOffset =
+      NumFunctionsOffset + sizeof(NumFunctionsType);
+
+  const uint8_t *P;
+  const uint8_t *E;
+
+  template <typename T> static T read(const uint8_t *P, const uint8_t *E) {
+    assert(P + sizeof(T) <= E && "out of bounds read!");
+    return support::endian::read<T, support::little, 1>(P);
+  }
+
+public:
+  class FunctionFaultInfoAccessor {
+    using FaultKindType = uint32_t;
+    using FaultingPCOffsetType = uint32_t;
+    using HandlerPCOffsetType = uint32_t;
+
+    static const size_t FaultKindOffset = 0;
+    static const size_t FaultingPCOffsetOffset =
+        FaultKindOffset + sizeof(FaultKindType);
+    static const size_t HandlerPCOffsetOffset =
+        FaultingPCOffsetOffset + sizeof(FaultingPCOffsetType);
+
+    const uint8_t *P;
+    const uint8_t *E;
+
+  public:
+    static const size_t Size =
+        HandlerPCOffsetOffset + sizeof(HandlerPCOffsetType);
+
+    explicit FunctionFaultInfoAccessor(const uint8_t *P, const uint8_t *E)
+        : P(P), E(E) {}
+
+    FaultKindType getFaultKind() const {
+      return read<FaultKindType>(P + FaultKindOffset, E);
+    }
+
+    FaultingPCOffsetType getFaultingPCOffset() const {
+      return read<FaultingPCOffsetType>(P + FaultingPCOffsetOffset, E);
+    }
+
+    HandlerPCOffsetType getHandlerPCOffset() const {
+      return read<HandlerPCOffsetType>(P + HandlerPCOffsetOffset, E);
+    }
+  };
+
+  class FunctionInfoAccessor {
+    using FunctionAddrType = uint64_t;
+    using NumFaultingPCsType = uint32_t;
+    using ReservedType = uint32_t;
+
+    static const size_t FunctionAddrOffset = 0;
+    static const size_t NumFaultingPCsOffset =
+        FunctionAddrOffset + sizeof(FunctionAddrType);
+    static const size_t ReservedOffset =
+        NumFaultingPCsOffset + sizeof(NumFaultingPCsType);
+    static const size_t FunctionFaultInfosOffset =
+        ReservedOffset + sizeof(ReservedType);
+    static const size_t FunctionInfoHeaderSize = FunctionFaultInfosOffset;
+
+    const uint8_t *P = nullptr;
+    const uint8_t *E = nullptr;
+
+  public:
+    FunctionInfoAccessor() = default;
+
+    explicit FunctionInfoAccessor(const uint8_t *P, const uint8_t *E)
+        : P(P), E(E) {}
+
+    FunctionAddrType getFunctionAddr() const {
+      return read<FunctionAddrType>(P + FunctionAddrOffset, E);
+    }
+
+    NumFaultingPCsType getNumFaultingPCs() const {
+      return read<NumFaultingPCsType>(P + NumFaultingPCsOffset, E);
+    }
+
+    FunctionFaultInfoAccessor getFunctionFaultInfoAt(uint32_t Index) const {
+      assert(Index < getNumFaultingPCs() && "index out of bounds!");
+      const uint8_t *Begin = P + FunctionFaultInfosOffset +
+                             FunctionFaultInfoAccessor::Size * Index;
+      return FunctionFaultInfoAccessor(Begin, E);
+    }
+
+    FunctionInfoAccessor getNextFunctionInfo() const {
+      size_t MySize = FunctionInfoHeaderSize +
+                      getNumFaultingPCs() * FunctionFaultInfoAccessor::Size;
+
+      const uint8_t *Begin = P + MySize;
+      assert(Begin < E && "out of bounds!");
+      return FunctionInfoAccessor(Begin, E);
+    }
+  };
+
+  explicit FaultMapParser(const uint8_t *Begin, const uint8_t *End)
+      : P(Begin), E(End) {}
+
+  FaultMapVersionType getFaultMapVersion() const {
+    auto Version = read<FaultMapVersionType>(P + FaultMapVersionOffset, E);
+    assert(Version == 1 && "only version 1 supported!");
+    return Version;
+  }
+
+  NumFunctionsType getNumFunctions() const {
+    return read<NumFunctionsType>(P + NumFunctionsOffset, E);
+  }
+
+  FunctionInfoAccessor getFirstFunctionInfo() const {
+    const uint8_t *Begin = P + FunctionInfosOffset;
+    return FunctionInfoAccessor(Begin, E);
+  }
+};
+
+raw_ostream &
+operator<<(raw_ostream &OS, const FaultMapParser::FunctionFaultInfoAccessor &);
+
+raw_ostream &operator<<(raw_ostream &OS,
+                        const FaultMapParser::FunctionInfoAccessor &);
+
+raw_ostream &operator<<(raw_ostream &OS, const FaultMapParser &);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_FAULTMAPS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/FunctionLoweringInfo.h b/linux-x64/clang/include/llvm/CodeGen/FunctionLoweringInfo.h
new file mode 100644
index 0000000..2da00b7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -0,0 +1,325 @@
+//===- FunctionLoweringInfo.h - Lower functions from LLVM IR to CodeGen ---===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This implements routines for translating functions from LLVM IR into
+// Machine IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
+#define LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/KnownBits.h"
+#include <cassert>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class Argument;
+class BasicBlock;
+class BranchProbabilityInfo;
+class Function;
+class Instruction;
+class MachineFunction;
+class MachineInstr;
+class MachineRegisterInfo;
+class MVT;
+class SelectionDAG;
+class TargetLowering;
+
+//===--------------------------------------------------------------------===//
+/// FunctionLoweringInfo - This contains information that is global to a
+/// function that is used when lowering a region of the function.
+///
+class FunctionLoweringInfo {
+public:
+  const Function *Fn;
+  MachineFunction *MF;
+  const TargetLowering *TLI;
+  MachineRegisterInfo *RegInfo;
+  BranchProbabilityInfo *BPI;
+  /// CanLowerReturn - true iff the function's return value can be lowered to
+  /// registers.
+  bool CanLowerReturn;
+
+  /// True if part of the CSRs will be handled via explicit copies.
+  bool SplitCSR;
+
+  /// DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg
+  /// allocated to hold a pointer to the hidden sret parameter.
+  unsigned DemoteRegister;
+
+  /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
+  DenseMap<const BasicBlock*, MachineBasicBlock *> MBBMap;
+
+  /// A map from swifterror value in a basic block to the virtual register it is
+  /// currently represented by.
+  DenseMap<std::pair<const MachineBasicBlock *, const Value *>, unsigned>
+      SwiftErrorVRegDefMap;
+
+  /// A list of upward exposed vreg uses that need to be satisfied by either a
+  /// copy def or a phi node at the beginning of the basic block representing
+  /// the predecessor(s) swifterror value.
+  DenseMap<std::pair<const MachineBasicBlock *, const Value *>, unsigned>
+      SwiftErrorVRegUpwardsUse;
+
+  /// A map from instructions that define/use a swifterror value to the virtual
+  /// register that represents that def/use.
+  llvm::DenseMap<PointerIntPair<const Instruction *, 1, bool>, unsigned>
+      SwiftErrorVRegDefUses;
+
+  /// The swifterror argument of the current function.
+  const Value *SwiftErrorArg;
+
+  using SwiftErrorValues = SmallVector<const Value*, 1>;
+  /// A function can only have a single swifterror argument. And if it does
+  /// have a swifterror argument, it must be the first entry in
+  /// SwiftErrorVals.
+  SwiftErrorValues SwiftErrorVals;
+
+  /// Get or create the swifterror value virtual register in
+  /// SwiftErrorVRegDefMap for this basic block.
+  unsigned getOrCreateSwiftErrorVReg(const MachineBasicBlock *,
+                                     const Value *);
+
+  /// Set the swifterror virtual register in the SwiftErrorVRegDefMap for this
+  /// basic block.
+  void setCurrentSwiftErrorVReg(const MachineBasicBlock *MBB, const Value *,
+                                unsigned);
+
+  /// Get or create the swifterror value virtual register for a def of a
+  /// swifterror by an instruction.
+  std::pair<unsigned, bool> getOrCreateSwiftErrorVRegDefAt(const Instruction *);
+  std::pair<unsigned, bool>
+  getOrCreateSwiftErrorVRegUseAt(const Instruction *, const MachineBasicBlock *,
+                                 const Value *);
+
+  /// ValueMap - Since we emit code for the function a basic block at a time,
+  /// we must remember which virtual registers hold the values for
+  /// cross-basic-block values.
+  DenseMap<const Value *, unsigned> ValueMap;
+
+  /// VirtReg2Value map is needed by the Divergence Analysis driven
+  /// instruction selection. It is reverted ValueMap. It is computed
+  /// in lazy style - on demand. It is used to get the Value corresponding
+  /// to the live in virtual register and is called from the
+  /// TargetLowerinInfo::isSDNodeSourceOfDivergence.
+  DenseMap<unsigned, const Value*> VirtReg2Value;
+
+  /// This method is called from TargetLowerinInfo::isSDNodeSourceOfDivergence
+  /// to get the Value corresponding to the live-in virtual register.
+  const Value * getValueFromVirtualReg(unsigned Vreg);
+
+  /// Track virtual registers created for exception pointers.
+  DenseMap<const Value *, unsigned> CatchPadExceptionPointers;
+
+  /// Keep track of frame indices allocated for statepoints as they could be
+  /// used across basic block boundaries.  This struct is more complex than a
+  /// simple map because the stateopint lowering code de-duplicates gc pointers
+  /// based on their SDValue (so %p and (bitcast %p to T) will get the same
+  /// slot), and we track that here.
+
+  struct StatepointSpillMap {
+    using SlotMapTy = DenseMap<const Value *, Optional<int>>;
+
+    /// Maps uniqued llvm IR values to the slots they were spilled in.  If a
+    /// value is mapped to None it means we visited the value but didn't spill
+    /// it (because it was a constant, for instance).
+    SlotMapTy SlotMap;
+
+    /// Maps llvm IR values to the values they were de-duplicated to.
+    DenseMap<const Value *, const Value *> DuplicateMap;
+
+    SlotMapTy::const_iterator find(const Value *V) const {
+      auto DuplIt = DuplicateMap.find(V);
+      if (DuplIt != DuplicateMap.end())
+        V = DuplIt->second;
+      return SlotMap.find(V);
+    }
+
+    SlotMapTy::const_iterator end() const { return SlotMap.end(); }
+  };
+
+  /// Maps gc.statepoint instructions to their corresponding StatepointSpillMap
+  /// instances.
+  DenseMap<const Instruction *, StatepointSpillMap> StatepointSpillMaps;
+
+  /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
+  /// the entry block.  This allows the allocas to be efficiently referenced
+  /// anywhere in the function.
+  DenseMap<const AllocaInst*, int> StaticAllocaMap;
+
+  /// ByValArgFrameIndexMap - Keep track of frame indices for byval arguments.
+  DenseMap<const Argument*, int> ByValArgFrameIndexMap;
+
+  /// ArgDbgValues - A list of DBG_VALUE instructions created during isel for
+  /// function arguments that are inserted after scheduling is completed.
+  SmallVector<MachineInstr*, 8> ArgDbgValues;
+
+  /// RegFixups - Registers which need to be replaced after isel is done.
+  DenseMap<unsigned, unsigned> RegFixups;
+
+  DenseSet<unsigned> RegsWithFixups;
+
+  /// StatepointStackSlots - A list of temporary stack slots (frame indices)
+  /// used to spill values at a statepoint.  We store them here to enable
+  /// reuse of the same stack slots across different statepoints in different
+  /// basic blocks.
+  SmallVector<unsigned, 50> StatepointStackSlots;
+
+  /// MBB - The current block.
+  MachineBasicBlock *MBB;
+
+  /// MBB - The current insert position inside the current block.
+  MachineBasicBlock::iterator InsertPt;
+
+  struct LiveOutInfo {
+    unsigned NumSignBits : 31;
+    unsigned IsValid : 1;
+    KnownBits Known = 1;
+
+    LiveOutInfo() : NumSignBits(0), IsValid(true) {}
+  };
+
+  /// Record the preferred extend type (ISD::SIGN_EXTEND or ISD::ZERO_EXTEND)
+  /// for a value.
+  DenseMap<const Value *, ISD::NodeType> PreferredExtendType;
+
+  /// VisitedBBs - The set of basic blocks visited thus far by instruction
+  /// selection.
+  SmallPtrSet<const BasicBlock*, 4> VisitedBBs;
+
+  /// PHINodesToUpdate - A list of phi instructions whose operand list will
+  /// be updated after processing the current basic block.
+  /// TODO: This isn't per-function state, it's per-basic-block state. But
+  /// there's no other convenient place for it to live right now.
+  std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
+  unsigned OrigNumPHINodesToUpdate;
+
+  /// If the current MBB is a landing pad, the exception pointer and exception
+  /// selector registers are copied into these virtual registers by
+  /// SelectionDAGISel::PrepareEHLandingPad().
+  unsigned ExceptionPointerVirtReg, ExceptionSelectorVirtReg;
+
+  /// set - Initialize this FunctionLoweringInfo with the given Function
+  /// and its associated MachineFunction.
+  ///
+  void set(const Function &Fn, MachineFunction &MF, SelectionDAG *DAG);
+
+  /// clear - Clear out all the function-specific state. This returns this
+  /// FunctionLoweringInfo to an empty state, ready to be used for a
+  /// different function.
+  void clear();
+
+  /// isExportedInst - Return true if the specified value is an instruction
+  /// exported from its block.
+  bool isExportedInst(const Value *V) {
+    return ValueMap.count(V);
+  }
+
+  unsigned CreateReg(MVT VT);
+
+  unsigned CreateRegs(Type *Ty);
+
+  unsigned InitializeRegForValue(const Value *V) {
+    // Tokens never live in vregs.
+    if (V->getType()->isTokenTy())
+      return 0;
+    unsigned &R = ValueMap[V];
+    assert(R == 0 && "Already initialized this value register!");
+    return R = CreateRegs(V->getType());
+  }
+
+  /// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
+  /// register is a PHI destination and the PHI's LiveOutInfo is not valid.
+  const LiveOutInfo *GetLiveOutRegInfo(unsigned Reg) {
+    if (!LiveOutRegInfo.inBounds(Reg))
+      return nullptr;
+
+    const LiveOutInfo *LOI = &LiveOutRegInfo[Reg];
+    if (!LOI->IsValid)
+      return nullptr;
+
+    return LOI;
+  }
+
+  /// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
+  /// register is a PHI destination and the PHI's LiveOutInfo is not valid. If
+  /// the register's LiveOutInfo is for a smaller bit width, it is extended to
+  /// the larger bit width by zero extension. The bit width must be no smaller
+  /// than the LiveOutInfo's existing bit width.
+  const LiveOutInfo *GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth);
+
+  /// AddLiveOutRegInfo - Adds LiveOutInfo for a register.
+  void AddLiveOutRegInfo(unsigned Reg, unsigned NumSignBits,
+                         const KnownBits &Known) {
+    // Only install this information if it tells us something.
+    if (NumSignBits == 1 && Known.isUnknown())
+      return;
+
+    LiveOutRegInfo.grow(Reg);
+    LiveOutInfo &LOI = LiveOutRegInfo[Reg];
+    LOI.NumSignBits = NumSignBits;
+    LOI.Known.One = Known.One;
+    LOI.Known.Zero = Known.Zero;
+  }
+
+  /// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
+  /// register based on the LiveOutInfo of its operands.
+  void ComputePHILiveOutRegInfo(const PHINode*);
+
+  /// InvalidatePHILiveOutRegInfo - Invalidates a PHI's LiveOutInfo, to be
+  /// called when a block is visited before all of its predecessors.
+  void InvalidatePHILiveOutRegInfo(const PHINode *PN) {
+    // PHIs with no uses have no ValueMap entry.
+    DenseMap<const Value*, unsigned>::const_iterator It = ValueMap.find(PN);
+    if (It == ValueMap.end())
+      return;
+
+    unsigned Reg = It->second;
+    if (Reg == 0)
+      return;
+
+    LiveOutRegInfo.grow(Reg);
+    LiveOutRegInfo[Reg].IsValid = false;
+  }
+
+  /// setArgumentFrameIndex - Record frame index for the byval
+  /// argument.
+  void setArgumentFrameIndex(const Argument *A, int FI);
+
+  /// getArgumentFrameIndex - Get frame index for the byval argument.
+  int getArgumentFrameIndex(const Argument *A);
+
+  unsigned getCatchPadExceptionPointerVReg(const Value *CPI,
+                                           const TargetRegisterClass *RC);
+
+private:
+  void addSEHHandlersForLPads(ArrayRef<const LandingPadInst *> LPads);
+
+  /// LiveOutRegInfo - Information about live out vregs.
+  IndexedMap<LiveOutInfo, VirtReg2IndexFunctor> LiveOutRegInfo;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GCMetadata.h b/linux-x64/clang/include/llvm/CodeGen/GCMetadata.h
new file mode 100644
index 0000000..ad2599f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GCMetadata.h
@@ -0,0 +1,207 @@
+//===- GCMetadata.h - Garbage collector metadata ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the GCFunctionInfo and GCModuleInfo classes, which are
+// used as a communication channel from the target code generator to the target
+// garbage collectors. This interface allows code generators and garbage
+// collectors to be developed independently.
+//
+// The GCFunctionInfo class logs the data necessary to build a type accurate
+// stack map. The code generator outputs:
+//
+//   - Safe points as specified by the GCStrategy's NeededSafePoints.
+//   - Stack offsets for GC roots, as specified by calls to llvm.gcroot
+//
+// As a refinement, liveness analysis calculates the set of live roots at each
+// safe point. Liveness analysis is not presently performed by the code
+// generator, so all roots are assumed live.
+//
+// GCModuleInfo simply collects GCFunctionInfo instances for each Function as
+// they are compiled. This accretion is necessary for collectors which must emit
+// a stack map for the compilation unit as a whole. Therefore, GCFunctionInfo
+// outlives the MachineFunction from which it is derived and must not refer to
+// any code generator data structures.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GCMETADATA_H
+#define LLVM_CODEGEN_GCMETADATA_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/GCStrategy.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/Pass.h"
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+class Constant;
+class Function;
+class MCSymbol;
+
+/// GCPoint - Metadata for a collector-safe point in machine code.
+///
+struct GCPoint {
+  GC::PointKind Kind; ///< The kind of the safe point.
+  MCSymbol *Label;    ///< A label.
+  DebugLoc Loc;
+
+  GCPoint(GC::PointKind K, MCSymbol *L, DebugLoc DL)
+      : Kind(K), Label(L), Loc(std::move(DL)) {}
+};
+
+/// GCRoot - Metadata for a pointer to an object managed by the garbage
+/// collector.
+struct GCRoot {
+  int Num;                  ///< Usually a frame index.
+  int StackOffset = -1;     ///< Offset from the stack pointer.
+  const Constant *Metadata; ///< Metadata straight from the call
+                            ///< to llvm.gcroot.
+
+  GCRoot(int N, const Constant *MD) : Num(N), Metadata(MD) {}
+};
+
+/// Garbage collection metadata for a single function.  Currently, this
+/// information only applies to GCStrategies which use GCRoot.
+class GCFunctionInfo {
+public:
+  using iterator = std::vector<GCPoint>::iterator;
+  using roots_iterator = std::vector<GCRoot>::iterator;
+  using live_iterator = std::vector<GCRoot>::const_iterator;
+
+private:
+  const Function &F;
+  GCStrategy &S;
+  uint64_t FrameSize;
+  std::vector<GCRoot> Roots;
+  std::vector<GCPoint> SafePoints;
+
+  // FIXME: Liveness. A 2D BitVector, perhaps?
+  //
+  //   BitVector Liveness;
+  //
+  //   bool islive(int point, int root) =
+  //     Liveness[point * SafePoints.size() + root]
+  //
+  // The bit vector is the more compact representation where >3.2% of roots
+  // are live per safe point (1.5% on 64-bit hosts).
+
+public:
+  GCFunctionInfo(const Function &F, GCStrategy &S);
+  ~GCFunctionInfo();
+
+  /// getFunction - Return the function to which this metadata applies.
+  const Function &getFunction() const { return F; }
+
+  /// getStrategy - Return the GC strategy for the function.
+  GCStrategy &getStrategy() { return S; }
+
+  /// addStackRoot - Registers a root that lives on the stack. Num is the
+  ///                stack object ID for the alloca (if the code generator is
+  //                 using  MachineFrameInfo).
+  void addStackRoot(int Num, const Constant *Metadata) {
+    Roots.push_back(GCRoot(Num, Metadata));
+  }
+
+  /// removeStackRoot - Removes a root.
+  roots_iterator removeStackRoot(roots_iterator position) {
+    return Roots.erase(position);
+  }
+
+  /// addSafePoint - Notes the existence of a safe point. Num is the ID of the
+  /// label just prior to the safe point (if the code generator is using
+  /// MachineModuleInfo).
+  void addSafePoint(GC::PointKind Kind, MCSymbol *Label, const DebugLoc &DL) {
+    SafePoints.emplace_back(Kind, Label, DL);
+  }
+
+  /// getFrameSize/setFrameSize - Records the function's frame size.
+  uint64_t getFrameSize() const { return FrameSize; }
+  void setFrameSize(uint64_t S) { FrameSize = S; }
+
+  /// begin/end - Iterators for safe points.
+  iterator begin() { return SafePoints.begin(); }
+  iterator end() { return SafePoints.end(); }
+  size_t size() const { return SafePoints.size(); }
+
+  /// roots_begin/roots_end - Iterators for all roots in the function.
+  roots_iterator roots_begin() { return Roots.begin(); }
+  roots_iterator roots_end() { return Roots.end(); }
+  size_t roots_size() const { return Roots.size(); }
+
+  /// live_begin/live_end - Iterators for live roots at a given safe point.
+  live_iterator live_begin(const iterator &p) { return roots_begin(); }
+  live_iterator live_end(const iterator &p) { return roots_end(); }
+  size_t live_size(const iterator &p) const { return roots_size(); }
+};
+
+/// An analysis pass which caches information about the entire Module.
+/// Records both the function level information used by GCRoots and a
+/// cache of the 'active' gc strategy objects for the current Module.
+class GCModuleInfo : public ImmutablePass {
+  /// An owning list of all GCStrategies which have been created
+  SmallVector<std::unique_ptr<GCStrategy>, 1> GCStrategyList;
+  /// A helper map to speedup lookups into the above list
+  StringMap<GCStrategy*> GCStrategyMap;
+
+public:
+  /// Lookup the GCStrategy object associated with the given gc name.
+  /// Objects are owned internally; No caller should attempt to delete the
+  /// returned objects.
+  GCStrategy *getGCStrategy(const StringRef Name);
+
+  /// List of per function info objects.  In theory, Each of these
+  /// may be associated with a different GC.
+  using FuncInfoVec = std::vector<std::unique_ptr<GCFunctionInfo>>;
+
+  FuncInfoVec::iterator funcinfo_begin() { return Functions.begin(); }
+  FuncInfoVec::iterator funcinfo_end() { return Functions.end(); }
+
+private:
+  /// Owning list of all GCFunctionInfos associated with this Module
+  FuncInfoVec Functions;
+
+  /// Non-owning map to bypass linear search when finding the GCFunctionInfo
+  /// associated with a particular Function.
+  using finfo_map_type = DenseMap<const Function *, GCFunctionInfo *>;
+  finfo_map_type FInfoMap;
+
+public:
+  using iterator = SmallVector<std::unique_ptr<GCStrategy>, 1>::const_iterator;
+
+  static char ID;
+
+  GCModuleInfo();
+
+  /// clear - Resets the pass. Any pass, which uses GCModuleInfo, should
+  /// call it in doFinalization().
+  ///
+  void clear();
+
+  /// begin/end - Iterators for used strategies.
+  ///
+  iterator begin() const { return GCStrategyList.begin(); }
+  iterator end() const { return GCStrategyList.end(); }
+
+  /// get - Look up function metadata.  This is currently assumed
+  /// have the side effect of initializing the associated GCStrategy.  That
+  /// will soon change.
+  GCFunctionInfo &getFunctionInfo(const Function &F);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GCMETADATA_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GCMetadataPrinter.h b/linux-x64/clang/include/llvm/CodeGen/GCMetadataPrinter.h
new file mode 100644
index 0000000..1cc69a7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GCMetadataPrinter.h
@@ -0,0 +1,67 @@
+//===- llvm/CodeGen/GCMetadataPrinter.h - Prints asm GC tables --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The abstract base class GCMetadataPrinter supports writing GC metadata tables
+// as assembly code. This is a separate class from GCStrategy in order to allow
+// users of the LLVM JIT to avoid linking with the AsmWriter.
+//
+// Subclasses of GCMetadataPrinter must be registered using the
+// GCMetadataPrinterRegistry. This is separate from the GCStrategy itself
+// because these subclasses are logically plugins for the AsmWriter.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GCMETADATAPRINTER_H
+#define LLVM_CODEGEN_GCMETADATAPRINTER_H
+
+#include "llvm/Support/Registry.h"
+
+namespace llvm {
+
+class AsmPrinter;
+class GCMetadataPrinter;
+class GCModuleInfo;
+class GCStrategy;
+class Module;
+
+/// GCMetadataPrinterRegistry - The GC assembly printer registry uses all the
+/// defaults from Registry.
+using GCMetadataPrinterRegistry = Registry<GCMetadataPrinter>;
+
+/// GCMetadataPrinter - Emits GC metadata as assembly code.  Instances are
+/// created, managed, and owned by the AsmPrinter.
+class GCMetadataPrinter {
+private:
+  friend class AsmPrinter;
+
+  GCStrategy *S;
+
+protected:
+  // May only be subclassed.
+  GCMetadataPrinter();
+
+public:
+  GCMetadataPrinter(const GCMetadataPrinter &) = delete;
+  GCMetadataPrinter &operator=(const GCMetadataPrinter &) = delete;
+  virtual ~GCMetadataPrinter();
+
+  GCStrategy &getStrategy() { return *S; }
+
+  /// Called before the assembly for the module is generated by
+  /// the AsmPrinter (but after target specific hooks.)
+  virtual void beginAssembly(Module &M, GCModuleInfo &Info, AsmPrinter &AP) {}
+
+  /// Called after the assembly for the module is generated by
+  /// the AsmPrinter (but before target specific hooks)
+  virtual void finishAssembly(Module &M, GCModuleInfo &Info, AsmPrinter &AP) {}
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GCMETADATAPRINTER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GCStrategy.h b/linux-x64/clang/include/llvm/CodeGen/GCStrategy.h
new file mode 100644
index 0000000..16168e7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GCStrategy.h
@@ -0,0 +1,181 @@
+//===- llvm/CodeGen/GCStrategy.h - Garbage collection -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// GCStrategy coordinates code generation algorithms and implements some itself
+// in order to generate code compatible with a target code generator as
+// specified in a function's 'gc' attribute. Algorithms are enabled by setting
+// flags in a subclass's constructor, and some virtual methods can be
+// overridden.
+//
+// GCStrategy is relevant for implementations using either gc.root or
+// gc.statepoint based lowering strategies, but is currently focused mostly on
+// options for gc.root.  This will change over time.
+//
+// When requested by a subclass of GCStrategy, the gc.root implementation will
+// populate GCModuleInfo and GCFunctionInfo with that about each Function in
+// the Module that opts in to garbage collection.  Specifically:
+//
+// - Safe points
+//   Garbage collection is generally only possible at certain points in code.
+//   GCStrategy can request that the collector insert such points:
+//
+//     - At and after any call to a subroutine
+//     - Before returning from the current function
+//     - Before backwards branches (loops)
+//
+// - Roots
+//   When a reference to a GC-allocated object exists on the stack, it must be
+//   stored in an alloca registered with llvm.gcoot.
+//
+// This information can used to emit the metadata tables which are required by
+// the target garbage collector runtime.
+//
+// When used with gc.statepoint, information about safepoint and roots can be
+// found in the binary StackMap section after code generation.  Safepoint
+// placement is currently the responsibility of the frontend, though late
+// insertion support is planned.  gc.statepoint does not currently support
+// custom stack map formats; such can be generated by parsing the standard
+// stack map section if desired.
+//
+// The read and write barrier support can be used with either implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GCSTRATEGY_H
+#define LLVM_CODEGEN_GCSTRATEGY_H
+
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/Registry.h"
+#include <string>
+
+namespace llvm {
+
+class Type;
+
+namespace GC {
+
+/// PointKind - Used to indicate whether the address of the call instruction
+/// or the address after the call instruction is listed in the stackmap.  For
+/// most runtimes, PostCall safepoints are appropriate.
+///
+enum PointKind {
+  PreCall, ///< Instr is a call instruction.
+  PostCall ///< Instr is the return address of a call.
+};
+
+} // end namespace GC
+
+/// GCStrategy describes a garbage collector algorithm's code generation
+/// requirements, and provides overridable hooks for those needs which cannot
+/// be abstractly described.  GCStrategy objects must be looked up through
+/// the Function.  The objects themselves are owned by the Context and must
+/// be immutable.
+class GCStrategy {
+private:
+  friend class GCModuleInfo;
+
+  std::string Name;
+
+protected:
+  bool UseStatepoints = false; /// Uses gc.statepoints as opposed to gc.roots,
+                               /// if set, none of the other options can be
+                               /// anything but their default values.
+
+  unsigned NeededSafePoints = 0;    ///< Bitmask of required safe points.
+  bool CustomReadBarriers = false;  ///< Default is to insert loads.
+  bool CustomWriteBarriers = false; ///< Default is to insert stores.
+  bool CustomRoots = false;      ///< Default is to pass through to backend.
+  bool InitRoots= true;          ///< If set, roots are nulled during lowering.
+  bool UsesMetadata = false;     ///< If set, backend must emit metadata tables.
+
+public:
+  GCStrategy();
+  virtual ~GCStrategy() = default;
+
+  /// Return the name of the GC strategy.  This is the value of the collector
+  /// name string specified on functions which use this strategy.
+  const std::string &getName() const { return Name; }
+
+  /// By default, write barriers are replaced with simple store
+  /// instructions. If true, you must provide a custom pass to lower 
+  /// calls to @llvm.gcwrite.
+  bool customWriteBarrier() const { return CustomWriteBarriers; }
+
+  /// By default, read barriers are replaced with simple load
+  /// instructions. If true, you must provide a custom pass to lower 
+  /// calls to @llvm.gcread.
+  bool customReadBarrier() const { return CustomReadBarriers; }
+
+  /// Returns true if this strategy is expecting the use of gc.statepoints,
+  /// and false otherwise.
+  bool useStatepoints() const { return UseStatepoints; }
+
+  /** @name Statepoint Specific Properties */
+  ///@{
+
+  /// If the type specified can be reliably distinguished, returns true for
+  /// pointers to GC managed locations and false for pointers to non-GC
+  /// managed locations.  Note a GCStrategy can always return 'None' (i.e. an
+  /// empty optional indicating it can't reliably distinguish.
+  virtual Optional<bool> isGCManagedPointer(const Type *Ty) const {
+    return None;
+  }
+  ///@}
+
+  /** @name GCRoot Specific Properties
+   * These properties and overrides only apply to collector strategies using
+   * GCRoot.
+   */
+  ///@{
+
+  /// True if safe points of any kind are required. By default, none are
+  /// recorded.
+  bool needsSafePoints() const { return NeededSafePoints != 0; }
+
+  /// True if the given kind of safe point is required. By default, none are
+  /// recorded.
+  bool needsSafePoint(GC::PointKind Kind) const {
+    return (NeededSafePoints & 1 << Kind) != 0;
+  }
+
+  /// By default, roots are left for the code generator so it can generate a
+  /// stack map. If true, you must provide a custom pass to lower 
+  /// calls to @llvm.gcroot.
+  bool customRoots() const { return CustomRoots; }
+
+  /// If set, gcroot intrinsics should initialize their allocas to null
+  /// before the first use. This is necessary for most GCs and is enabled by
+  /// default.
+  bool initializeRoots() const { return InitRoots; }
+
+  /// If set, appropriate metadata tables must be emitted by the back-end
+  /// (assembler, JIT, or otherwise). For statepoint, this method is
+  /// currently unsupported.  The stackmap information can be found in the
+  /// StackMap section as described in the documentation.
+  bool usesMetadata() const { return UsesMetadata; }
+
+  ///@}
+};
+
+/// Subclasses of GCStrategy are made available for use during compilation by
+/// adding them to the global GCRegistry.  This can done either within the
+/// LLVM source tree or via a loadable plugin.  An example registeration
+/// would be:
+/// static GCRegistry::Add<CustomGC> X("custom-name",
+///        "my custom supper fancy gc strategy");
+///
+/// Note that to use a custom GCMetadataPrinter w/gc.roots, you must also
+/// register your GCMetadataPrinter subclass with the
+/// GCMetadataPrinterRegistery as well.
+using GCRegistry = Registry<GCStrategy>;
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GCSTRATEGY_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GCs.h b/linux-x64/clang/include/llvm/CodeGen/GCs.h
new file mode 100644
index 0000000..5207f80
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GCs.h
@@ -0,0 +1,46 @@
+//===-- GCs.h - Garbage collector linkage hacks ---------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains hack functions to force linking in the GC components.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GCS_H
+#define LLVM_CODEGEN_GCS_H
+
+namespace llvm {
+class GCStrategy;
+class GCMetadataPrinter;
+
+/// FIXME: Collector instances are not useful on their own. These no longer
+///        serve any purpose except to link in the plugins.
+
+/// Creates a CoreCLR-compatible garbage collector.
+void linkCoreCLRGC();
+
+/// Creates an ocaml-compatible garbage collector.
+void linkOcamlGC();
+
+/// Creates an ocaml-compatible metadata printer.
+void linkOcamlGCPrinter();
+
+/// Creates an erlang-compatible garbage collector.
+void linkErlangGC();
+
+/// Creates an erlang-compatible metadata printer.
+void linkErlangGCPrinter();
+
+/// Creates a shadow stack garbage collector. This collector requires no code
+/// generator support.
+void linkShadowStackGC();
+
+void linkStatepointExampleGC();
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h
new file mode 100644
index 0000000..8d91cc4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -0,0 +1,215 @@
+//===- llvm/CodeGen/GlobalISel/CallLowering.h - Call lowering ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file describes how to lower LLVM calls to machine code calls.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
+#define LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/TargetCallingConv.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachineValueType.h"
+#include <cstdint>
+#include <functional>
+
+namespace llvm {
+
+class DataLayout;
+class Function;
+class MachineIRBuilder;
+class MachineOperand;
+struct MachinePointerInfo;
+class MachineRegisterInfo;
+class TargetLowering;
+class Type;
+class Value;
+
+class CallLowering {
+  const TargetLowering *TLI;
+
+public:
+  struct ArgInfo {
+    unsigned Reg;
+    Type *Ty;
+    ISD::ArgFlagsTy Flags;
+    bool IsFixed;
+
+    ArgInfo(unsigned Reg, Type *Ty, ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy{},
+            bool IsFixed = true)
+        : Reg(Reg), Ty(Ty), Flags(Flags), IsFixed(IsFixed) {}
+  };
+
+  /// Argument handling is mostly uniform between the four places that
+  /// make these decisions: function formal arguments, call
+  /// instruction args, call instruction returns and function
+  /// returns. However, once a decision has been made on where an
+  /// arugment should go, exactly what happens can vary slightly. This
+  /// class abstracts the differences.
+  struct ValueHandler {
+    ValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
+                 CCAssignFn *AssignFn)
+      : MIRBuilder(MIRBuilder), MRI(MRI), AssignFn(AssignFn) {}
+
+    virtual ~ValueHandler() = default;
+
+    /// Materialize a VReg containing the address of the specified
+    /// stack-based object. This is either based on a FrameIndex or
+    /// direct SP manipulation, depending on the context. \p MPO
+    /// should be initialized to an appropriate description of the
+    /// address created.
+    virtual unsigned getStackAddress(uint64_t Size, int64_t Offset,
+                                     MachinePointerInfo &MPO) = 0;
+
+    /// The specified value has been assigned to a physical register,
+    /// handle the appropriate COPY (either to or from) and mark any
+    /// relevant uses/defines as needed.
+    virtual void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
+                                  CCValAssign &VA) = 0;
+
+    /// The specified value has been assigned to a stack
+    /// location. Load or store it there, with appropriate extension
+    /// if necessary.
+    virtual void assignValueToAddress(unsigned ValVReg, unsigned Addr,
+                                      uint64_t Size, MachinePointerInfo &MPO,
+                                      CCValAssign &VA) = 0;
+
+    /// Handle custom values, which may be passed into one or more of \p VAs.
+    /// \return The number of \p VAs that have been assigned after the first
+    ///         one, and which should therefore be skipped from further
+    ///         processing.
+    virtual unsigned assignCustomValue(const ArgInfo &Arg,
+                                       ArrayRef<CCValAssign> VAs) {
+      // This is not a pure virtual method because not all targets need to worry
+      // about custom values.
+      llvm_unreachable("Custom values not supported");
+    }
+
+    unsigned extendRegister(unsigned ValReg, CCValAssign &VA);
+
+    virtual bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
+                           CCValAssign::LocInfo LocInfo, const ArgInfo &Info,
+                           CCState &State) {
+      return AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
+    }
+
+    MachineIRBuilder &MIRBuilder;
+    MachineRegisterInfo &MRI;
+    CCAssignFn *AssignFn;
+  };
+
+protected:
+  /// Getter for generic TargetLowering class.
+  const TargetLowering *getTLI() const {
+    return TLI;
+  }
+
+  /// Getter for target specific TargetLowering class.
+  template <class XXXTargetLowering>
+    const XXXTargetLowering *getTLI() const {
+    return static_cast<const XXXTargetLowering *>(TLI);
+  }
+
+  template <typename FuncInfoTy>
+  void setArgFlags(ArgInfo &Arg, unsigned OpNum, const DataLayout &DL,
+                   const FuncInfoTy &FuncInfo) const;
+
+  /// Invoke Handler::assignArg on each of the given \p Args and then use
+  /// \p Callback to move them to the assigned locations.
+  ///
+  /// \return True if everything has succeeded, false otherwise.
+  bool handleAssignments(MachineIRBuilder &MIRBuilder, ArrayRef<ArgInfo> Args,
+                         ValueHandler &Callback) const;
+
+public:
+  CallLowering(const TargetLowering *TLI) : TLI(TLI) {}
+  virtual ~CallLowering() = default;
+
+  /// This hook must be implemented to lower outgoing return values, described
+  /// by \p Val, into the specified virtual register \p VReg.
+  /// This hook is used by GlobalISel.
+  ///
+  /// \return True if the lowering succeeds, false otherwise.
+  virtual bool lowerReturn(MachineIRBuilder &MIRBuilder,
+                           const Value *Val, unsigned VReg) const {
+    return false;
+  }
+
+  /// This hook must be implemented to lower the incoming (formal)
+  /// arguments, described by \p Args, for GlobalISel. Each argument
+  /// must end up in the related virtual register described by VRegs.
+  /// In other words, the first argument should end up in VRegs[0],
+  /// the second in VRegs[1], and so on.
+  /// \p MIRBuilder is set to the proper insertion for the argument
+  /// lowering.
+  ///
+  /// \return True if the lowering succeeded, false otherwise.
+  virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder,
+                                    const Function &F,
+                                    ArrayRef<unsigned> VRegs) const {
+    return false;
+  }
+
+  /// This hook must be implemented to lower the given call instruction,
+  /// including argument and return value marshalling.
+  ///
+  /// \p CallConv is the calling convention to be used for the call.
+  ///
+  /// \p Callee is the destination of the call. It should be either a register,
+  /// globaladdress, or externalsymbol.
+  ///
+  /// \p ResTy is the type returned by the function
+  ///
+  /// \p ResReg is the generic virtual register that the returned
+  /// value should be lowered into.
+  ///
+  /// \p ArgTys is a list of the types each member of \p ArgRegs has; used by
+  /// the target to decide which register/stack slot should be allocated.
+  ///
+  /// \p ArgRegs is a list of virtual registers containing each argument that
+  /// needs to be passed.
+  ///
+  /// \return true if the lowering succeeded, false otherwise.
+  virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
+                         const MachineOperand &Callee, const ArgInfo &OrigRet,
+                         ArrayRef<ArgInfo> OrigArgs) const {
+    return false;
+  }
+
+  /// Lower the given call instruction, including argument and return value
+  /// marshalling.
+  ///
+  /// \p CI is the call/invoke instruction.
+  ///
+  /// \p ResReg is a register where the call's return value should be stored (or
+  /// 0 if there is no return value).
+  ///
+  /// \p ArgRegs is a list of virtual registers containing each argument that
+  /// needs to be passed.
+  ///
+  /// \p GetCalleeReg is a callback to materialize a register for the callee if
+  /// the target determines it cannot jump to the destination based purely on \p
+  /// CI. This might be because \p CI is indirect, or because of the limited
+  /// range of an immediate jump.
+  ///
+  /// \return true if the lowering succeeded, false otherwise.
+  bool lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
+                 unsigned ResReg, ArrayRef<unsigned> ArgRegs,
+                 std::function<unsigned()> GetCalleeReg) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Combiner.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Combiner.h
new file mode 100644
index 0000000..36a33de
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Combiner.h
@@ -0,0 +1,43 @@
+//== ----- llvm/CodeGen/GlobalISel/Combiner.h --------------------- == //
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// This contains common code to drive combines. Combiner Passes will need to
+/// setup a CombinerInfo and call combineMachineFunction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINER_H
+#define LLVM_CODEGEN_GLOBALISEL_COMBINER_H
+
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+class MachineRegisterInfo;
+class CombinerInfo;
+class TargetPassConfig;
+class MachineFunction;
+
+class Combiner {
+public:
+  Combiner(CombinerInfo &CombinerInfo, const TargetPassConfig *TPC);
+
+  bool combineMachineInstrs(MachineFunction &MF);
+
+protected:
+  CombinerInfo &CInfo;
+
+  MachineRegisterInfo *MRI = nullptr;
+  const TargetPassConfig *TPC;
+  MachineIRBuilder Builder;
+};
+
+} // End namespace llvm.
+
+#endif // LLVM_CODEGEN_GLOBALISEL_GICOMBINER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
new file mode 100644
index 0000000..5d5b839
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -0,0 +1,44 @@
+//== llvm/CodeGen/GlobalISel/CombinerHelper.h -------------- -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===--------------------------------------------------------------------===//
+//
+/// This contains common combine transformations that may be used in a combine
+/// pass,or by the target elsewhere.
+/// Targets can pick individual opcode transformations from the helper or use
+/// tryCombine which invokes all transformations. All of the transformations
+/// return true if the MachineInstruction changed and false otherwise.
+//
+//===--------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
+#define LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
+
+namespace llvm {
+
+class MachineIRBuilder;
+class MachineRegisterInfo;
+class MachineInstr;
+
+class CombinerHelper {
+  MachineIRBuilder &Builder;
+  MachineRegisterInfo &MRI;
+
+public:
+  CombinerHelper(MachineIRBuilder &B);
+
+  /// If \p MI is COPY, try to combine it.
+  /// Returns true if MI changed.
+  bool tryCombineCopy(MachineInstr &MI);
+
+  /// Try to transform \p MI by using all of the above
+  /// combine functions. Returns true if changed.
+  bool tryCombine(MachineInstr &MI);
+};
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerInfo.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
new file mode 100644
index 0000000..1d24854
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
@@ -0,0 +1,48 @@
+//===- llvm/CodeGen/GlobalISel/CombinerInfo.h ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// Interface for Targets to specify which operations are combined how and when.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINER_INFO_H
+#define LLVM_CODEGEN_GLOBALISEL_COMBINER_INFO_H
+
+#include <cassert>
+namespace llvm {
+
+class LegalizerInfo;
+class MachineInstr;
+class MachineIRBuilder;
+class MachineRegisterInfo;
+// Contains information relevant to enabling/disabling various combines for a
+// pass.
+class CombinerInfo {
+public:
+  CombinerInfo(bool AllowIllegalOps, bool ShouldLegalizeIllegal,
+               LegalizerInfo *LInfo)
+      : IllegalOpsAllowed(AllowIllegalOps),
+        LegalizeIllegalOps(ShouldLegalizeIllegal), LInfo(LInfo) {
+    assert(((AllowIllegalOps || !LegalizeIllegalOps) || LInfo) &&
+           "Expecting legalizerInfo when illegalops not allowed");
+  }
+  virtual ~CombinerInfo() = default;
+  /// If \p IllegalOpsAllowed is false, the CombinerHelper will make use of
+  /// the legalizerInfo to check for legality before each transformation.
+  bool IllegalOpsAllowed; // TODO: Make use of this.
+
+  /// If \p LegalizeIllegalOps is true, the Combiner will also legalize the
+  /// illegal ops that are created.
+  bool LegalizeIllegalOps; // TODO: Make use of this.
+  const LegalizerInfo *LInfo;
+  virtual bool combine(MachineInstr &MI, MachineIRBuilder &B) const = 0;
+};
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelWorkList.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
new file mode 100644
index 0000000..167905d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
@@ -0,0 +1,69 @@
+//===- GISelWorkList.h - Worklist for GISel passes ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_GISEL_WORKLIST_H
+#define LLVM_GISEL_WORKLIST_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Debug.h"
+
+namespace llvm {
+
+class MachineInstr;
+
+// Worklist which mostly works similar to InstCombineWorkList, but on MachineInstrs.
+// The main difference with something like a SetVector is that erasing an element doesn't
+// move all elements over one place - instead just nulls out the element of the vector.
+// FIXME: Does it make sense to factor out common code with the instcombinerWorkList?
+template<unsigned N>
+class GISelWorkList {
+  SmallVector<MachineInstr*, N> Worklist;
+  DenseMap<MachineInstr*, unsigned> WorklistMap;
+
+public:
+  GISelWorkList() = default;
+
+  bool empty() const { return WorklistMap.empty(); }
+
+  unsigned size() const { return WorklistMap.size(); }
+
+  /// Add - Add the specified instruction to the worklist if it isn't already
+  /// in it.
+  void insert(MachineInstr *I) {
+    if (WorklistMap.try_emplace(I, Worklist.size()).second) {
+      Worklist.push_back(I);
+    }
+  }
+
+  /// Remove - remove I from the worklist if it exists.
+  void remove(MachineInstr *I) {
+    auto It = WorklistMap.find(I);
+    if (It == WorklistMap.end()) return; // Not in worklist.
+
+    // Don't bother moving everything down, just null out the slot.
+    Worklist[It->second] = nullptr;
+
+    WorklistMap.erase(It);
+  }
+
+  MachineInstr *pop_back_val() {
+    MachineInstr *I;
+    do {
+      I = Worklist.pop_back_val();
+    } while(!I);
+    assert(I && "Pop back on empty worklist");
+    WorklistMap.erase(I);
+    return I;
+  }
+};
+
+} // end namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
new file mode 100644
index 0000000..7061c01
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -0,0 +1,444 @@
+//===- llvm/CodeGen/GlobalISel/IRTranslator.h - IRTranslator ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares the IRTranslator pass.
+/// This pass is responsible for translating LLVM IR into MachineInstr.
+/// It uses target hooks to lower the ABI but aside from that, the pass
+/// generated code is generic. This is the default translator used for
+/// GlobalISel.
+///
+/// \todo Replace the comments with actual doxygen comments.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
+#define LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Types.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/IR/Intrinsics.h"
+#include <memory>
+#include <utility>
+
+namespace llvm {
+
+class AllocaInst;
+class BasicBlock;
+class CallInst;
+class CallLowering;
+class Constant;
+class DataLayout;
+class Instruction;
+class MachineBasicBlock;
+class MachineFunction;
+class MachineInstr;
+class MachineRegisterInfo;
+class OptimizationRemarkEmitter;
+class PHINode;
+class TargetPassConfig;
+class User;
+class Value;
+
+// Technically the pass should run on an hypothetical MachineModule,
+// since it should translate Global into some sort of MachineGlobal.
+// The MachineGlobal should ultimately just be a transfer of ownership of
+// the interesting bits that are relevant to represent a global value.
+// That being said, we could investigate what would it cost to just duplicate
+// the information from the LLVM IR.
+// The idea is that ultimately we would be able to free up the memory used
+// by the LLVM IR as soon as the translation is over.
+class IRTranslator : public MachineFunctionPass {
+public:
+  static char ID;
+
+private:
+  /// Interface used to lower the everything related to calls.
+  const CallLowering *CLI;
+
+  /// Mapping of the values of the current LLVM IR function
+  /// to the related virtual registers.
+  ValueToVReg ValToVReg;
+
+  // N.b. it's not completely obvious that this will be sufficient for every
+  // LLVM IR construct (with "invoke" being the obvious candidate to mess up our
+  // lives.
+  DenseMap<const BasicBlock *, MachineBasicBlock *> BBToMBB;
+
+  // One BasicBlock can be translated to multiple MachineBasicBlocks.  For such
+  // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains
+  // a mapping between the edges arriving at the BasicBlock to the corresponding
+  // created MachineBasicBlocks. Some BasicBlocks that get translated to a
+  // single MachineBasicBlock may also end up in this Map.
+  using CFGEdge = std::pair<const BasicBlock *, const BasicBlock *>;
+  DenseMap<CFGEdge, SmallVector<MachineBasicBlock *, 1>> MachinePreds;
+
+  // List of stubbed PHI instructions, for values and basic blocks to be filled
+  // in once all MachineBasicBlocks have been created.
+  SmallVector<std::pair<const PHINode *, MachineInstr *>, 4> PendingPHIs;
+
+  /// Record of what frame index has been allocated to specified allocas for
+  /// this function.
+  DenseMap<const AllocaInst *, int> FrameIndices;
+
+  /// \name Methods for translating form LLVM IR to MachineInstr.
+  /// \see ::translate for general information on the translate methods.
+  /// @{
+
+  /// Translate \p Inst into its corresponding MachineInstr instruction(s).
+  /// Insert the newly translated instruction(s) right where the CurBuilder
+  /// is set.
+  ///
+  /// The general algorithm is:
+  /// 1. Look for a virtual register for each operand or
+  ///    create one.
+  /// 2 Update the ValToVReg accordingly.
+  /// 2.alt. For constant arguments, if they are compile time constants,
+  ///   produce an immediate in the right operand and do not touch
+  ///   ValToReg. Actually we will go with a virtual register for each
+  ///   constants because it may be expensive to actually materialize the
+  ///   constant. Moreover, if the constant spans on several instructions,
+  ///   CSE may not catch them.
+  ///   => Update ValToVReg and remember that we saw a constant in Constants.
+  ///   We will materialize all the constants in finalize.
+  /// Note: we would need to do something so that we can recognize such operand
+  ///       as constants.
+  /// 3. Create the generic instruction.
+  ///
+  /// \return true if the translation succeeded.
+  bool translate(const Instruction &Inst);
+
+  /// Materialize \p C into virtual-register \p Reg. The generic instructions
+  /// performing this materialization will be inserted into the entry block of
+  /// the function.
+  ///
+  /// \return true if the materialization succeeded.
+  bool translate(const Constant &C, unsigned Reg);
+
+  /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
+  /// emitted.
+  bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate an LLVM load instruction into generic IR.
+  bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate an LLVM store instruction into generic IR.
+  bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate an LLVM string intrinsic (memcpy, memset, ...).
+  bool translateMemfunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
+                        unsigned Intrinsic);
+
+  void getStackGuard(unsigned DstReg, MachineIRBuilder &MIRBuilder);
+
+  bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
+                                  MachineIRBuilder &MIRBuilder);
+
+  bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
+                               MachineIRBuilder &MIRBuilder);
+
+  bool translateInlineAsm(const CallInst &CI, MachineIRBuilder &MIRBuilder);
+
+  /// Translate call instruction.
+  /// \pre \p U is a call instruction.
+  bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate one of LLVM's cast instructions into MachineInstrs, with the
+  /// given generic Opcode.
+  bool translateCast(unsigned Opcode, const User &U,
+                     MachineIRBuilder &MIRBuilder);
+
+  /// Translate a phi instruction.
+  bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate a comparison (icmp or fcmp) instruction or constant.
+  bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate an integer compare instruction (or constant).
+  bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCompare(U, MIRBuilder);
+  }
+
+  /// Translate a floating-point compare instruction (or constant).
+  bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCompare(U, MIRBuilder);
+  }
+
+  /// Add remaining operands onto phis we've translated. Executed after all
+  /// MachineBasicBlocks for the function have been created.
+  void finishPendingPhis();
+
+  /// Translate \p Inst into a binary operation \p Opcode.
+  /// \pre \p U is a binary operation.
+  bool translateBinaryOp(unsigned Opcode, const User &U,
+                         MachineIRBuilder &MIRBuilder);
+
+  /// Translate branch (br) instruction.
+  /// \pre \p U is a branch instruction.
+  bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateExtractValue(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate return (ret) instruction.
+  /// The target needs to implement CallLowering::lowerReturn for
+  /// this to succeed.
+  /// \pre \p U is a return instruction.
+  bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
+  }
+  bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_SUB, U, MIRBuilder);
+  }
+  bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_AND, U, MIRBuilder);
+  }
+  bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_MUL, U, MIRBuilder);
+  }
+  bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_OR, U, MIRBuilder);
+  }
+  bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_XOR, U, MIRBuilder);
+  }
+
+  bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_UDIV, U, MIRBuilder);
+  }
+  bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_SDIV, U, MIRBuilder);
+  }
+  bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_UREM, U, MIRBuilder);
+  }
+  bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_SREM, U, MIRBuilder);
+  }
+  bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_INTTOPTR, U, MIRBuilder);
+  }
+  bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
+  }
+  bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
+  }
+  bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_FPTRUNC, U, MIRBuilder);
+  }
+  bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_FPEXT, U, MIRBuilder);
+  }
+  bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_FPTOUI, U, MIRBuilder);
+  }
+  bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_FPTOSI, U, MIRBuilder);
+  }
+  bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_UITOFP, U, MIRBuilder);
+  }
+  bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_SITOFP, U, MIRBuilder);
+  }
+  bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder) {
+    return true;
+  }
+  bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_SEXT, U, MIRBuilder);
+  }
+
+  bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_ZEXT, U, MIRBuilder);
+  }
+
+  bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_SHL, U, MIRBuilder);
+  }
+  bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_LSHR, U, MIRBuilder);
+  }
+  bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_ASHR, U, MIRBuilder);
+  }
+
+  bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
+  }
+  bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
+  }
+  bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_FDIV, U, MIRBuilder);
+  }
+  bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_FREM, U, MIRBuilder);
+  }
+
+  bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateExtractElement(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);
+
+  // Stubs to keep the compiler happy while we implement the rest of the
+  // translation.
+  bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateFence(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+
+  /// @}
+
+  // Builder for machine instruction a la IRBuilder.
+  // I.e., compared to regular MIBuilder, this one also inserts the instruction
+  // in the current block, it can creates block, etc., basically a kind of
+  // IRBuilder, but for Machine IR.
+  MachineIRBuilder CurBuilder;
+
+  // Builder set to the entry block (just after ABI lowering instructions). Used
+  // as a convenient location for Constants.
+  MachineIRBuilder EntryBuilder;
+
+  // The MachineFunction currently being translated.
+  MachineFunction *MF;
+
+  /// MachineRegisterInfo used to create virtual registers.
+  MachineRegisterInfo *MRI = nullptr;
+
+  const DataLayout *DL;
+
+  /// Current target configuration. Controls how the pass handles errors.
+  const TargetPassConfig *TPC;
+
+  /// Current optimization remark emitter. Used to report failures.
+  std::unique_ptr<OptimizationRemarkEmitter> ORE;
+
+  // * Insert all the code needed to materialize the constants
+  // at the proper place. E.g., Entry block or dominator block
+  // of each constant depending on how fancy we want to be.
+  // * Clear the different maps.
+  void finalizeFunction();
+
+  /// Get the VReg that represents \p Val.
+  /// If such VReg does not exist, it is created.
+  unsigned getOrCreateVReg(const Value &Val);
+
+  /// Get the frame index that represents \p Val.
+  /// If such VReg does not exist, it is created.
+  int getOrCreateFrameIndex(const AllocaInst &AI);
+
+  /// Get the alignment of the given memory operation instruction. This will
+  /// either be the explicitly specified value or the ABI-required alignment for
+  /// the type being accessed (according to the Module's DataLayout).
+  unsigned getMemOpAlignment(const Instruction &I);
+
+  /// Get the MachineBasicBlock that represents \p BB. Specifically, the block
+  /// returned will be the head of the translated block (suitable for branch
+  /// destinations).
+  MachineBasicBlock &getMBB(const BasicBlock &BB);
+
+  /// Record \p NewPred as a Machine predecessor to `Edge.second`, corresponding
+  /// to `Edge.first` at the IR level. This is used when IRTranslation creates
+  /// multiple MachineBasicBlocks for a given IR block and the CFG is no longer
+  /// represented simply by the IR-level CFG.
+  void addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred);
+
+  /// Returns the Machine IR predecessors for the given IR CFG edge. Usually
+  /// this is just the single MachineBasicBlock corresponding to the predecessor
+  /// in the IR. More complex lowering can result in multiple MachineBasicBlocks
+  /// preceding the original though (e.g. switch instructions).
+  SmallVector<MachineBasicBlock *, 1> getMachinePredBBs(CFGEdge Edge) {
+    auto RemappedEdge = MachinePreds.find(Edge);
+    if (RemappedEdge != MachinePreds.end())
+      return RemappedEdge->second;
+    return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(*Edge.first));
+  }
+
+public:
+  // Ctor, nothing fancy.
+  IRTranslator();
+
+  StringRef getPassName() const override { return "IRTranslator"; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  // Algo:
+  //   CallLowering = MF.subtarget.getCallLowering()
+  //   F = MF.getParent()
+  //   MIRBuilder.reset(MF)
+  //   getMBB(F.getEntryBB())
+  //   CallLowering->translateArguments(MIRBuilder, F, ValToVReg)
+  //   for each bb in F
+  //     getMBB(bb)
+  //     for each inst in bb
+  //       if (!translate(MIRBuilder, inst, ValToVReg, ConstantToSequence))
+  //         report_fatal_error("Don't know how to translate input");
+  //   finalize()
+  bool runOnMachineFunction(MachineFunction &MF) override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelect.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelect.h
new file mode 100644
index 0000000..01521c4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelect.h
@@ -0,0 +1,53 @@
+//== llvm/CodeGen/GlobalISel/InstructionSelect.h -----------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file This file describes the interface of the MachineFunctionPass
+/// responsible for selecting (possibly generic) machine instructions to
+/// target-specific instructions.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECT_H
+#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECT_H
+
+#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+/// This pass is responsible for selecting generic machine instructions to
+/// target-specific instructions.  It relies on the InstructionSelector provided
+/// by the target.
+/// Selection is done by examining blocks in post-order, and instructions in
+/// reverse order.
+///
+/// \post for all inst in MF: not isPreISelGenericOpcode(inst.opcode)
+class InstructionSelect : public MachineFunctionPass {
+public:
+  static char ID;
+  StringRef getPassName() const override { return "InstructionSelect"; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  MachineFunctionProperties getRequiredProperties() const override {
+    return MachineFunctionProperties()
+        .set(MachineFunctionProperties::Property::IsSSA)
+        .set(MachineFunctionProperties::Property::Legalized)
+        .set(MachineFunctionProperties::Property::RegBankSelected);
+  }
+
+  MachineFunctionProperties getSetProperties() const override {
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::Selected);
+  }
+
+  InstructionSelect();
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // End namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
new file mode 100644
index 0000000..eacd135
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
@@ -0,0 +1,381 @@
+//===- llvm/CodeGen/GlobalISel/InstructionSelector.h ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API for the instruction selector.
+/// This class is responsible for selecting machine instructions.
+/// It's implemented by the target. It's used by the InstructionSelect pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
+#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/CodeGenCoverage.h"
+#include <bitset>
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+#include <initializer_list>
+#include <vector>
+
+namespace llvm {
+
+class APInt;
+class APFloat;
+class LLT;
+class MachineInstr;
+class MachineInstrBuilder;
+class MachineFunction;
+class MachineOperand;
+class MachineRegisterInfo;
+class RegisterBankInfo;
+class TargetInstrInfo;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+/// Container class for CodeGen predicate results.
+/// This is convenient because std::bitset does not have a constructor
+/// with an initializer list of set bits.
+///
+/// Each InstructionSelector subclass should define a PredicateBitset class
+/// with:
+///   const unsigned MAX_SUBTARGET_PREDICATES = 192;
+///   using PredicateBitset = PredicateBitsetImpl<MAX_SUBTARGET_PREDICATES>;
+/// and updating the constant to suit the target. Tablegen provides a suitable
+/// definition for the predicates in use in <Target>GenGlobalISel.inc when
+/// GET_GLOBALISEL_PREDICATE_BITSET is defined.
+template <std::size_t MaxPredicates>
+class PredicateBitsetImpl : public std::bitset<MaxPredicates> {
+public:
+  // Cannot inherit constructors because it's not supported by VC++..
+  PredicateBitsetImpl() = default;
+
+  PredicateBitsetImpl(const std::bitset<MaxPredicates> &B)
+      : std::bitset<MaxPredicates>(B) {}
+
+  PredicateBitsetImpl(std::initializer_list<unsigned> Init) {
+    for (auto I : Init)
+      std::bitset<MaxPredicates>::set(I);
+  }
+};
+
+enum {
+  /// Begin a try-block to attempt a match and jump to OnFail if it is
+  /// unsuccessful.
+  /// - OnFail - The MatchTable entry at which to resume if the match fails.
+  ///
+  /// FIXME: This ought to take an argument indicating the number of try-blocks
+  ///        to exit on failure. It's usually one but the last match attempt of
+  ///        a block will need more. The (implemented) alternative is to tack a
+  ///        GIM_Reject on the end of each try-block which is simpler but
+  ///        requires an extra opcode and iteration in the interpreter on each
+  ///        failed match.
+  GIM_Try,
+
+  /// Record the specified instruction
+  /// - NewInsnID - Instruction ID to define
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  GIM_RecordInsn,
+
+  /// Check the feature bits
+  /// - Expected features
+  GIM_CheckFeatures,
+
+  /// Check the opcode on the specified instruction
+  /// - InsnID - Instruction ID
+  /// - Expected opcode
+  GIM_CheckOpcode,
+  /// Check the instruction has the right number of operands
+  /// - InsnID - Instruction ID
+  /// - Expected number of operands
+  GIM_CheckNumOperands,
+  /// Check an immediate predicate on the specified instruction
+  /// - InsnID - Instruction ID
+  /// - The predicate to test
+  GIM_CheckI64ImmPredicate,
+  /// Check an immediate predicate on the specified instruction via an APInt.
+  /// - InsnID - Instruction ID
+  /// - The predicate to test
+  GIM_CheckAPIntImmPredicate,
+  /// Check a floating point immediate predicate on the specified instruction.
+  /// - InsnID - Instruction ID
+  /// - The predicate to test
+  GIM_CheckAPFloatImmPredicate,
+  /// Check a memory operation has the specified atomic ordering.
+  /// - InsnID - Instruction ID
+  /// - Ordering - The AtomicOrdering value
+  GIM_CheckAtomicOrdering,
+  GIM_CheckAtomicOrderingOrStrongerThan,
+  GIM_CheckAtomicOrderingWeakerThan,
+
+  /// Check the type for the specified operand
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - Expected type
+  GIM_CheckType,
+  /// Check the type of a pointer to any address space.
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - SizeInBits - The size of the pointer value in bits.
+  GIM_CheckPointerToAny,
+  /// Check the register bank for the specified operand
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - Expected register bank (specified as a register class)
+  GIM_CheckRegBankForClass,
+  /// Check the operand matches a complex predicate
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - RendererID - The renderer to hold the result
+  /// - Complex predicate ID
+  GIM_CheckComplexPattern,
+  /// Check the operand is a specific integer
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - Expected integer
+  GIM_CheckConstantInt,
+  /// Check the operand is a specific literal integer (i.e. MO.isImm() or
+  /// MO.isCImm() is true).
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - Expected integer
+  GIM_CheckLiteralInt,
+  /// Check the operand is a specific intrinsic ID
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - Expected Intrinsic ID
+  GIM_CheckIntrinsicID,
+  /// Check the specified operand is an MBB
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  GIM_CheckIsMBB,
+
+  /// Check if the specified operand is safe to fold into the current
+  /// instruction.
+  /// - InsnID - Instruction ID
+  GIM_CheckIsSafeToFold,
+
+  /// Check the specified operands are identical.
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - OtherInsnID - Other instruction ID
+  /// - OtherOpIdx - Other operand index
+  GIM_CheckIsSameOperand,
+
+  /// Fail the current try-block, or completely fail to match if there is no
+  /// current try-block.
+  GIM_Reject,
+
+  //=== Renderers ===
+
+  /// Mutate an instruction
+  /// - NewInsnID - Instruction ID to define
+  /// - OldInsnID - Instruction ID to mutate
+  /// - NewOpcode - The new opcode to use
+  GIR_MutateOpcode,
+  /// Build a new instruction
+  /// - InsnID - Instruction ID to define
+  /// - Opcode - The new opcode to use
+  GIR_BuildMI,
+
+  /// Copy an operand to the specified instruction
+  /// - NewInsnID - Instruction ID to modify
+  /// - OldInsnID - Instruction ID to copy from
+  /// - OpIdx - The operand to copy
+  GIR_Copy,
+  /// Copy an operand to the specified instruction or add a zero register if the
+  /// operand is a zero immediate.
+  /// - NewInsnID - Instruction ID to modify
+  /// - OldInsnID - Instruction ID to copy from
+  /// - OpIdx - The operand to copy
+  /// - ZeroReg - The zero register to use
+  GIR_CopyOrAddZeroReg,
+  /// Copy an operand to the specified instruction
+  /// - NewInsnID - Instruction ID to modify
+  /// - OldInsnID - Instruction ID to copy from
+  /// - OpIdx - The operand to copy
+  /// - SubRegIdx - The subregister to copy
+  GIR_CopySubReg,
+  /// Add an implicit register def to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - RegNum - The register to add
+  GIR_AddImplicitDef,
+  /// Add an implicit register use to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - RegNum - The register to add
+  GIR_AddImplicitUse,
+  /// Add an register to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - RegNum - The register to add
+  GIR_AddRegister,
+  /// Add a temporary register to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - TempRegID - The temporary register ID to add
+  GIR_AddTempRegister,
+  /// Add an immediate to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - Imm - The immediate to add
+  GIR_AddImm,
+  /// Render complex operands to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - RendererID - The renderer to call
+  GIR_ComplexRenderer,
+  /// Render sub-operands of complex operands to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - RendererID - The renderer to call
+  /// - RenderOpID - The suboperand to render.
+  GIR_ComplexSubOperandRenderer,
+  /// Render operands to the specified instruction using a custom function
+  /// - InsnID - Instruction ID to modify
+  /// - OldInsnID - Instruction ID to get the matched operand from
+  /// - RendererFnID - Custom renderer function to call
+  GIR_CustomRenderer,
+
+  /// Render a G_CONSTANT operator as a sign-extended immediate.
+  /// - NewInsnID - Instruction ID to modify
+  /// - OldInsnID - Instruction ID to copy from
+  /// The operand index is implicitly 1.
+  GIR_CopyConstantAsSImm,
+
+  /// Constrain an instruction operand to a register class.
+  /// - InsnID - Instruction ID to modify
+  /// - OpIdx - Operand index
+  /// - RCEnum - Register class enumeration value
+  GIR_ConstrainOperandRC,
+  /// Constrain an instructions operands according to the instruction
+  /// description.
+  /// - InsnID - Instruction ID to modify
+  GIR_ConstrainSelectedInstOperands,
+  /// Merge all memory operands into instruction.
+  /// - InsnID - Instruction ID to modify
+  /// - MergeInsnID... - One or more Instruction ID to merge into the result.
+  /// - GIU_MergeMemOperands_EndOfList - Terminates the list of instructions to
+  ///                                    merge.
+  GIR_MergeMemOperands,
+  /// Erase from parent.
+  /// - InsnID - Instruction ID to erase
+  GIR_EraseFromParent,
+  /// Create a new temporary register that's not constrained.
+  /// - TempRegID - The temporary register ID to initialize.
+  /// - Expected type
+  GIR_MakeTempReg,
+
+  /// A successful emission
+  GIR_Done,
+
+  /// Increment the rule coverage counter.
+  /// - RuleID - The ID of the rule that was covered.
+  GIR_Coverage,
+};
+
+enum {
+  /// Indicates the end of the variable-length MergeInsnID list in a
+  /// GIR_MergeMemOperands opcode.
+  GIU_MergeMemOperands_EndOfList = -1,
+};
+
+/// Provides the logic to select generic machine instructions.
+class InstructionSelector {
+public:
+  virtual ~InstructionSelector() = default;
+
+  /// Select the (possibly generic) instruction \p I to only use target-specific
+  /// opcodes. It is OK to insert multiple instructions, but they cannot be
+  /// generic pre-isel instructions.
+  ///
+  /// \returns whether selection succeeded.
+  /// \pre  I.getParent() && I.getParent()->getParent()
+  /// \post
+  ///   if returns true:
+  ///     for I in all mutated/inserted instructions:
+  ///       !isPreISelGenericOpcode(I.getOpcode())
+  virtual bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const = 0;
+
+protected:
+  using ComplexRendererFns =
+      Optional<SmallVector<std::function<void(MachineInstrBuilder &)>, 4>>;
+  using RecordedMIVector = SmallVector<MachineInstr *, 4>;
+  using NewMIVector = SmallVector<MachineInstrBuilder, 4>;
+
+  struct MatcherState {
+    std::vector<ComplexRendererFns::value_type> Renderers;
+    RecordedMIVector MIs;
+    DenseMap<unsigned, unsigned> TempRegisters;
+
+    MatcherState(unsigned MaxRenderers);
+  };
+
+public:
+  template <class PredicateBitset, class ComplexMatcherMemFn,
+            class CustomRendererFn>
+  struct ISelInfoTy {
+    const LLT *TypeObjects;
+    const PredicateBitset *FeatureBitsets;
+    const ComplexMatcherMemFn *ComplexPredicates;
+    const CustomRendererFn *CustomRenderers;
+  };
+
+protected:
+  InstructionSelector();
+
+  /// Execute a given matcher table and return true if the match was successful
+  /// and false otherwise.
+  template <class TgtInstructionSelector, class PredicateBitset,
+            class ComplexMatcherMemFn, class CustomRendererFn>
+  bool executeMatchTable(
+      TgtInstructionSelector &ISel, NewMIVector &OutMIs, MatcherState &State,
+      const ISelInfoTy<PredicateBitset, ComplexMatcherMemFn, CustomRendererFn>
+          &ISelInfo,
+      const int64_t *MatchTable, const TargetInstrInfo &TII,
+      MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
+      const RegisterBankInfo &RBI, const PredicateBitset &AvailableFeatures,
+      CodeGenCoverage &CoverageInfo) const;
+
+  virtual bool testImmPredicate_I64(unsigned, int64_t) const {
+    llvm_unreachable("Subclasses must override this to use tablegen");
+  }
+  virtual bool testImmPredicate_APInt(unsigned, const APInt &) const {
+    llvm_unreachable("Subclasses must override this to use tablegen");
+  }
+  virtual bool testImmPredicate_APFloat(unsigned, const APFloat &) const {
+    llvm_unreachable("Subclasses must override this to use tablegen");
+  }
+
+  /// Constrain a register operand of an instruction \p I to a specified
+  /// register class. This could involve inserting COPYs before (for uses) or
+  /// after (for defs) and may replace the operand of \p I.
+  /// \returns whether operand regclass constraining succeeded.
+  bool constrainOperandRegToRegClass(MachineInstr &I, unsigned OpIdx,
+                                     const TargetRegisterClass &RC,
+                                     const TargetInstrInfo &TII,
+                                     const TargetRegisterInfo &TRI,
+                                     const RegisterBankInfo &RBI) const;
+
+  bool isOperandImmEqual(const MachineOperand &MO, int64_t Value,
+                         const MachineRegisterInfo &MRI) const;
+
+  /// Return true if the specified operand is a G_GEP with a G_CONSTANT on the
+  /// right-hand side. GlobalISel's separation of pointer and integer types
+  /// means that we don't need to worry about G_OR with equivalent semantics.
+  bool isBaseWithConstantOffset(const MachineOperand &Root,
+                                const MachineRegisterInfo &MRI) const;
+
+  /// Return true if MI can obviously be folded into IntoMI.
+  /// MI and IntoMI do not need to be in the same basic blocks, but MI must
+  /// preceed IntoMI.
+  bool isObviouslySafeToFold(MachineInstr &MI, MachineInstr &IntoMI) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
new file mode 100644
index 0000000..f7593ba
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
@@ -0,0 +1,769 @@
+//===- llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API for the instruction selector.
+/// This class is responsible for selecting machine instructions.
+/// It's implemented by the target. It's used by the InstructionSelect pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
+#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
+#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+
+namespace llvm {
+
+/// GlobalISel PatFrag Predicates
+enum {
+  GIPFP_I64_Invalid = 0,
+  GIPFP_APInt_Invalid = 0,
+  GIPFP_APFloat_Invalid = 0,
+};
+
+template <class TgtInstructionSelector, class PredicateBitset,
+          class ComplexMatcherMemFn, class CustomRendererFn>
+bool InstructionSelector::executeMatchTable(
+    TgtInstructionSelector &ISel, NewMIVector &OutMIs, MatcherState &State,
+    const ISelInfoTy<PredicateBitset, ComplexMatcherMemFn, CustomRendererFn>
+        &ISelInfo,
+    const int64_t *MatchTable, const TargetInstrInfo &TII,
+    MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
+    const RegisterBankInfo &RBI, const PredicateBitset &AvailableFeatures,
+    CodeGenCoverage &CoverageInfo) const {
+  uint64_t CurrentIdx = 0;
+  SmallVector<uint64_t, 8> OnFailResumeAt;
+
+  enum RejectAction { RejectAndGiveUp, RejectAndResume };
+  auto handleReject = [&]() -> RejectAction {
+    DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                    dbgs() << CurrentIdx << ": Rejected\n");
+    if (OnFailResumeAt.empty())
+      return RejectAndGiveUp;
+    CurrentIdx = OnFailResumeAt.back();
+    OnFailResumeAt.pop_back();
+    DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                    dbgs() << CurrentIdx << ": Resume at " << CurrentIdx << " ("
+                           << OnFailResumeAt.size() << " try-blocks remain)\n");
+    return RejectAndResume;
+  };
+
+  while (true) {
+    assert(CurrentIdx != ~0u && "Invalid MatchTable index");
+    switch (MatchTable[CurrentIdx++]) {
+    case GIM_Try: {
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": Begin try-block\n");
+      OnFailResumeAt.push_back(MatchTable[CurrentIdx++]);
+      break;
+    }
+
+    case GIM_RecordInsn: {
+      int64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+
+      // As an optimisation we require that MIs[0] is always the root. Refuse
+      // any attempt to modify it.
+      assert(NewInsnID != 0 && "Refusing to modify MIs[0]");
+
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (!MO.isReg()) {
+        DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                        dbgs() << CurrentIdx << ": Not a register\n");
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+        break;
+      }
+      if (TRI.isPhysicalRegister(MO.getReg())) {
+        DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                        dbgs() << CurrentIdx << ": Is a physical register\n");
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+        break;
+      }
+
+      MachineInstr *NewMI = MRI.getVRegDef(MO.getReg());
+      if ((size_t)NewInsnID < State.MIs.size())
+        State.MIs[NewInsnID] = NewMI;
+      else {
+        assert((size_t)NewInsnID == State.MIs.size() &&
+               "Expected to store MIs in order");
+        State.MIs.push_back(NewMI);
+      }
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": MIs[" << NewInsnID
+                             << "] = GIM_RecordInsn(" << InsnID << ", " << OpIdx
+                             << ")\n");
+      break;
+    }
+
+    case GIM_CheckFeatures: {
+      int64_t ExpectedBitsetID = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx
+                             << ": GIM_CheckFeatures(ExpectedBitsetID="
+                             << ExpectedBitsetID << ")\n");
+      if ((AvailableFeatures & ISelInfo.FeatureBitsets[ExpectedBitsetID]) !=
+          ISelInfo.FeatureBitsets[ExpectedBitsetID]) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+
+    case GIM_CheckOpcode: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t Expected = MatchTable[CurrentIdx++];
+
+      unsigned Opcode = State.MIs[InsnID]->getOpcode();
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
+                             << "], ExpectedOpcode=" << Expected
+                             << ") // Got=" << Opcode << "\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      if (Opcode != Expected) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+
+    case GIM_CheckNumOperands: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t Expected = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckNumOperands(MIs["
+                             << InsnID << "], Expected=" << Expected << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      if (State.MIs[InsnID]->getNumOperands() != Expected) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+    case GIM_CheckI64ImmPredicate: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t Predicate = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs()
+                          << CurrentIdx << ": GIM_CheckI64ImmPredicate(MIs["
+                          << InsnID << "], Predicate=" << Predicate << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_CONSTANT &&
+             "Expected G_CONSTANT");
+      assert(Predicate > GIPFP_I64_Invalid && "Expected a valid predicate");
+      int64_t Value = 0;
+      if (State.MIs[InsnID]->getOperand(1).isCImm())
+        Value = State.MIs[InsnID]->getOperand(1).getCImm()->getSExtValue();
+      else if (State.MIs[InsnID]->getOperand(1).isImm())
+        Value = State.MIs[InsnID]->getOperand(1).getImm();
+      else
+        llvm_unreachable("Expected Imm or CImm operand");
+
+      if (!testImmPredicate_I64(Predicate, Value))
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      break;
+    }
+    case GIM_CheckAPIntImmPredicate: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t Predicate = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs()
+                          << CurrentIdx << ": GIM_CheckAPIntImmPredicate(MIs["
+                          << InsnID << "], Predicate=" << Predicate << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      assert(State.MIs[InsnID]->getOpcode() && "Expected G_CONSTANT");
+      assert(Predicate > GIPFP_APInt_Invalid && "Expected a valid predicate");
+      APInt Value;
+      if (State.MIs[InsnID]->getOperand(1).isCImm())
+        Value = State.MIs[InsnID]->getOperand(1).getCImm()->getValue();
+      else
+        llvm_unreachable("Expected Imm or CImm operand");
+
+      if (!testImmPredicate_APInt(Predicate, Value))
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      break;
+    }
+    case GIM_CheckAPFloatImmPredicate: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t Predicate = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs()
+                          << CurrentIdx << ": GIM_CheckAPFloatImmPredicate(MIs["
+                          << InsnID << "], Predicate=" << Predicate << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_FCONSTANT &&
+             "Expected G_FCONSTANT");
+      assert(State.MIs[InsnID]->getOperand(1).isFPImm() && "Expected FPImm operand");
+      assert(Predicate > GIPFP_APFloat_Invalid && "Expected a valid predicate");
+      APFloat Value = State.MIs[InsnID]->getOperand(1).getFPImm()->getValueAPF();
+
+      if (!testImmPredicate_APFloat(Predicate, Value))
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      break;
+    }
+    case GIM_CheckAtomicOrdering: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckAtomicOrdering(MIs["
+                             << InsnID << "], " << (uint64_t)Ordering << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      if (!State.MIs[InsnID]->hasOneMemOperand())
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+
+      for (const auto &MMO : State.MIs[InsnID]->memoperands())
+        if (MMO->getOrdering() != Ordering)
+          if (handleReject() == RejectAndGiveUp)
+            return false;
+      break;
+    }
+    case GIM_CheckAtomicOrderingOrStrongerThan: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx
+                             << ": GIM_CheckAtomicOrderingOrStrongerThan(MIs["
+                             << InsnID << "], " << (uint64_t)Ordering << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      if (!State.MIs[InsnID]->hasOneMemOperand())
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+
+      for (const auto &MMO : State.MIs[InsnID]->memoperands())
+        if (!isAtLeastOrStrongerThan(MMO->getOrdering(), Ordering))
+          if (handleReject() == RejectAndGiveUp)
+            return false;
+      break;
+    }
+    case GIM_CheckAtomicOrderingWeakerThan: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx
+                             << ": GIM_CheckAtomicOrderingWeakerThan(MIs["
+                             << InsnID << "], " << (uint64_t)Ordering << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      if (!State.MIs[InsnID]->hasOneMemOperand())
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+
+      for (const auto &MMO : State.MIs[InsnID]->memoperands())
+        if (!isStrongerThan(Ordering, MMO->getOrdering()))
+          if (handleReject() == RejectAndGiveUp)
+            return false;
+      break;
+    }
+    case GIM_CheckType: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t TypeID = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckType(MIs[" << InsnID
+                             << "]->getOperand(" << OpIdx
+                             << "), TypeID=" << TypeID << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (!MO.isReg() ||
+          MRI.getType(MO.getReg()) != ISelInfo.TypeObjects[TypeID]) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+    case GIM_CheckPointerToAny: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t SizeInBits = MatchTable[CurrentIdx++];
+
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckPointerToAny(MIs["
+                             << InsnID << "]->getOperand(" << OpIdx
+                             << "), SizeInBits=" << SizeInBits << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      // iPTR must be looked up in the target.
+      if (SizeInBits == 0) {
+        MachineFunction *MF = State.MIs[InsnID]->getParent()->getParent();
+        SizeInBits = MF->getDataLayout().getPointerSizeInBits(0);
+      }
+
+      assert(SizeInBits != 0 && "Pointer size must be known");
+
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (MO.isReg()) {
+        const LLT &Ty = MRI.getType(MO.getReg());
+        if (!Ty.isPointer() || Ty.getSizeInBits() != SizeInBits)
+          if (handleReject() == RejectAndGiveUp)
+            return false;
+      } else if (handleReject() == RejectAndGiveUp)
+        return false;
+
+      break;
+    }
+    case GIM_CheckRegBankForClass: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t RCEnum = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckRegBankForClass(MIs["
+                             << InsnID << "]->getOperand(" << OpIdx
+                             << "), RCEnum=" << RCEnum << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (!MO.isReg() ||
+          &RBI.getRegBankFromRegClass(*TRI.getRegClass(RCEnum)) !=
+              RBI.getRegBank(MO.getReg(), MRI, TRI)) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+
+    case GIM_CheckComplexPattern: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t RendererID = MatchTable[CurrentIdx++];
+      int64_t ComplexPredicateID = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": State.Renderers[" << RendererID
+                             << "] = GIM_CheckComplexPattern(MIs[" << InsnID
+                             << "]->getOperand(" << OpIdx
+                             << "), ComplexPredicateID=" << ComplexPredicateID
+                             << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      // FIXME: Use std::invoke() when it's available.
+      ComplexRendererFns Renderer =
+          (ISel.*ISelInfo.ComplexPredicates[ComplexPredicateID])(
+              State.MIs[InsnID]->getOperand(OpIdx));
+      if (Renderer.hasValue())
+        State.Renderers[RendererID] = Renderer.getValue();
+      else
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      break;
+    }
+
+    case GIM_CheckConstantInt: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t Value = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckConstantInt(MIs["
+                             << InsnID << "]->getOperand(" << OpIdx
+                             << "), Value=" << Value << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (MO.isReg()) {
+        // isOperandImmEqual() will sign-extend to 64-bits, so should we.
+        LLT Ty = MRI.getType(MO.getReg());
+        Value = SignExtend64(Value, Ty.getSizeInBits());
+
+        if (!isOperandImmEqual(MO, Value, MRI)) {
+          if (handleReject() == RejectAndGiveUp)
+            return false;
+        }
+      } else if (handleReject() == RejectAndGiveUp)
+        return false;
+
+      break;
+    }
+
+    case GIM_CheckLiteralInt: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t Value = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckLiteralInt(MIs["
+                             << InsnID << "]->getOperand(" << OpIdx
+                             << "), Value=" << Value << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (!MO.isCImm() || !MO.getCImm()->equalsInt(Value)) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+
+    case GIM_CheckIntrinsicID: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t Value = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckIntrinsicID(MIs["
+                             << InsnID << "]->getOperand(" << OpIdx
+                             << "), Value=" << Value << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (!MO.isIntrinsicID() || MO.getIntrinsicID() != Value)
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      break;
+    }
+
+    case GIM_CheckIsMBB: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckIsMBB(MIs[" << InsnID
+                             << "]->getOperand(" << OpIdx << "))\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      if (!State.MIs[InsnID]->getOperand(OpIdx).isMBB()) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+
+    case GIM_CheckIsSafeToFold: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckIsSafeToFold(MIs["
+                             << InsnID << "])\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      if (!isObviouslySafeToFold(*State.MIs[InsnID], *State.MIs[0])) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+    case GIM_CheckIsSameOperand: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t OtherInsnID = MatchTable[CurrentIdx++];
+      int64_t OtherOpIdx = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckIsSameOperand(MIs["
+                             << InsnID << "][" << OpIdx << "], MIs["
+                             << OtherInsnID << "][" << OtherOpIdx << "])\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      assert(State.MIs[OtherInsnID] != nullptr && "Used insn before defined");
+      if (!State.MIs[InsnID]->getOperand(OpIdx).isIdenticalTo(
+              State.MIs[OtherInsnID]->getOperand(OtherOpIdx))) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+    case GIM_Reject:
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_Reject");
+      if (handleReject() == RejectAndGiveUp)
+        return false;
+      break;
+
+    case GIR_MutateOpcode: {
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      uint64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t NewOpcode = MatchTable[CurrentIdx++];
+      if (NewInsnID >= OutMIs.size())
+        OutMIs.resize(NewInsnID + 1);
+
+      OutMIs[NewInsnID] = MachineInstrBuilder(*State.MIs[OldInsnID]->getMF(),
+                                              State.MIs[OldInsnID]);
+      OutMIs[NewInsnID]->setDesc(TII.get(NewOpcode));
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_MutateOpcode(OutMIs["
+                             << NewInsnID << "], MIs[" << OldInsnID << "], "
+                             << NewOpcode << ")\n");
+      break;
+    }
+
+    case GIR_BuildMI: {
+      uint64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t Opcode = MatchTable[CurrentIdx++];
+      if (NewInsnID >= OutMIs.size())
+        OutMIs.resize(NewInsnID + 1);
+
+      OutMIs[NewInsnID] = BuildMI(*State.MIs[0]->getParent(), State.MIs[0],
+                                  State.MIs[0]->getDebugLoc(), TII.get(Opcode));
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_BuildMI(OutMIs["
+                             << NewInsnID << "], " << Opcode << ")\n");
+      break;
+    }
+
+    case GIR_Copy: {
+      int64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+      OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(OpIdx));
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs()
+                          << CurrentIdx << ": GIR_Copy(OutMIs[" << NewInsnID
+                          << "], MIs[" << OldInsnID << "], " << OpIdx << ")\n");
+      break;
+    }
+
+    case GIR_CopyOrAddZeroReg: {
+      int64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t ZeroReg = MatchTable[CurrentIdx++];
+      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+      MachineOperand &MO = State.MIs[OldInsnID]->getOperand(OpIdx);
+      if (isOperandImmEqual(MO, 0, MRI))
+        OutMIs[NewInsnID].addReg(ZeroReg);
+      else
+        OutMIs[NewInsnID].add(MO);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_CopyOrAddZeroReg(OutMIs["
+                             << NewInsnID << "], MIs[" << OldInsnID << "], "
+                             << OpIdx << ", " << ZeroReg << ")\n");
+      break;
+    }
+
+    case GIR_CopySubReg: {
+      int64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t SubRegIdx = MatchTable[CurrentIdx++];
+      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+      OutMIs[NewInsnID].addReg(State.MIs[OldInsnID]->getOperand(OpIdx).getReg(),
+                               0, SubRegIdx);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_CopySubReg(OutMIs["
+                             << NewInsnID << "], MIs[" << OldInsnID << "], "
+                             << OpIdx << ", " << SubRegIdx << ")\n");
+      break;
+    }
+
+    case GIR_AddImplicitDef: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t RegNum = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      OutMIs[InsnID].addDef(RegNum, RegState::Implicit);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_AddImplicitDef(OutMIs["
+                             << InsnID << "], " << RegNum << ")\n");
+      break;
+    }
+
+    case GIR_AddImplicitUse: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t RegNum = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      OutMIs[InsnID].addUse(RegNum, RegState::Implicit);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_AddImplicitUse(OutMIs["
+                             << InsnID << "], " << RegNum << ")\n");
+      break;
+    }
+
+    case GIR_AddRegister: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t RegNum = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      OutMIs[InsnID].addReg(RegNum);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_AddRegister(OutMIs["
+                             << InsnID << "], " << RegNum << ")\n");
+      break;
+    }
+
+    case GIR_AddTempRegister: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t TempRegID = MatchTable[CurrentIdx++];
+      uint64_t TempRegFlags = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      OutMIs[InsnID].addReg(State.TempRegisters[TempRegID], TempRegFlags);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_AddTempRegister(OutMIs["
+                             << InsnID << "], TempRegisters[" << TempRegID
+                             << "], " << TempRegFlags << ")\n");
+      break;
+    }
+
+    case GIR_AddImm: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t Imm = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      OutMIs[InsnID].addImm(Imm);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_AddImm(OutMIs[" << InsnID
+                             << "], " << Imm << ")\n");
+      break;
+    }
+
+    case GIR_ComplexRenderer: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t RendererID = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      for (const auto &RenderOpFn : State.Renderers[RendererID])
+        RenderOpFn(OutMIs[InsnID]);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_ComplexRenderer(OutMIs["
+                             << InsnID << "], " << RendererID << ")\n");
+      break;
+    }
+    case GIR_ComplexSubOperandRenderer: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t RendererID = MatchTable[CurrentIdx++];
+      int64_t RenderOpID = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      State.Renderers[RendererID][RenderOpID](OutMIs[InsnID]);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx
+                             << ": GIR_ComplexSubOperandRenderer(OutMIs["
+                             << InsnID << "], " << RendererID << ", "
+                             << RenderOpID << ")\n");
+      break;
+    }
+
+    case GIR_CopyConstantAsSImm: {
+      int64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+      assert(State.MIs[OldInsnID]->getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
+      if (State.MIs[OldInsnID]->getOperand(1).isCImm()) {
+        OutMIs[NewInsnID].addImm(
+            State.MIs[OldInsnID]->getOperand(1).getCImm()->getSExtValue());
+      } else if (State.MIs[OldInsnID]->getOperand(1).isImm())
+        OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(1));
+      else
+        llvm_unreachable("Expected Imm or CImm operand");
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_CopyConstantAsSImm(OutMIs["
+                             << NewInsnID << "], MIs[" << OldInsnID << "])\n");
+      break;
+    }
+
+    case GIR_CustomRenderer: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      int64_t RendererFnID = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_CustomRenderer(OutMIs["
+                             << InsnID << "], MIs[" << OldInsnID << "], "
+                             << RendererFnID << ")\n");
+      (ISel.*ISelInfo.CustomRenderers[RendererFnID])(OutMIs[InsnID],
+                                                     *State.MIs[OldInsnID]);
+      break;
+    }
+    case GIR_ConstrainOperandRC: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t RCEnum = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      constrainOperandRegToRegClass(*OutMIs[InsnID].getInstr(), OpIdx,
+                                    *TRI.getRegClass(RCEnum), TII, TRI, RBI);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_ConstrainOperandRC(OutMIs["
+                             << InsnID << "], " << OpIdx << ", " << RCEnum
+                             << ")\n");
+      break;
+    }
+
+    case GIR_ConstrainSelectedInstOperands: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      constrainSelectedInstRegOperands(*OutMIs[InsnID].getInstr(), TII, TRI,
+                                       RBI);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx
+                             << ": GIR_ConstrainSelectedInstOperands(OutMIs["
+                             << InsnID << "])\n");
+      break;
+    }
+
+    case GIR_MergeMemOperands: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_MergeMemOperands(OutMIs["
+                             << InsnID << "]");
+      int64_t MergeInsnID = GIU_MergeMemOperands_EndOfList;
+      while ((MergeInsnID = MatchTable[CurrentIdx++]) !=
+             GIU_MergeMemOperands_EndOfList) {
+        DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                        dbgs() << ", MIs[" << MergeInsnID << "]");
+        for (const auto &MMO : State.MIs[MergeInsnID]->memoperands())
+          OutMIs[InsnID].addMemOperand(MMO);
+      }
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(), dbgs() << ")\n");
+      break;
+    }
+
+    case GIR_EraseFromParent: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      assert(State.MIs[InsnID] &&
+             "Attempted to erase an undefined instruction");
+      State.MIs[InsnID]->eraseFromParent();
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_EraseFromParent(MIs["
+                             << InsnID << "])\n");
+      break;
+    }
+
+    case GIR_MakeTempReg: {
+      int64_t TempRegID = MatchTable[CurrentIdx++];
+      int64_t TypeID = MatchTable[CurrentIdx++];
+
+      State.TempRegisters[TempRegID] =
+          MRI.createGenericVirtualRegister(ISelInfo.TypeObjects[TypeID]);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": TempRegs[" << TempRegID
+                             << "] = GIR_MakeTempReg(" << TypeID << ")\n");
+      break;
+    }
+
+    case GIR_Coverage: {
+      int64_t RuleID = MatchTable[CurrentIdx++];
+      CoverageInfo.setCovered(RuleID);
+
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs()
+                          << CurrentIdx << ": GIR_Coverage(" << RuleID << ")");
+      break;
+    }
+
+    case GIR_Done:
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_Done");
+      return true;
+
+    default:
+      llvm_unreachable("Unexpected command");
+    }
+  }
+}
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
new file mode 100644
index 0000000..a1f564b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -0,0 +1,287 @@
+//===-- llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h --===========//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file contains some helper functions which try to cleanup artifacts
+// such as G_TRUNCs/G_[ZSA]EXTENDS that were created during legalization to make
+// the types match. This file also contains some combines of merges that happens
+// at the end of the legalization.
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/GlobalISel/Legalizer.h"
+#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "legalizer"
+
+namespace llvm {
+class LegalizationArtifactCombiner {
+  MachineIRBuilder &Builder;
+  MachineRegisterInfo &MRI;
+  const LegalizerInfo &LI;
+
+public:
+  LegalizationArtifactCombiner(MachineIRBuilder &B, MachineRegisterInfo &MRI,
+                    const LegalizerInfo &LI)
+      : Builder(B), MRI(MRI), LI(LI) {}
+
+  bool tryCombineAnyExt(MachineInstr &MI,
+                        SmallVectorImpl<MachineInstr *> &DeadInsts) {
+    if (MI.getOpcode() != TargetOpcode::G_ANYEXT)
+      return false;
+    if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_TRUNC,
+                                           MI.getOperand(1).getReg(), MRI)) {
+      DEBUG(dbgs() << ".. Combine MI: " << MI;);
+      unsigned DstReg = MI.getOperand(0).getReg();
+      unsigned SrcReg = DefMI->getOperand(1).getReg();
+      Builder.setInstr(MI);
+      // We get a copy/trunc/extend depending on the sizes
+      Builder.buildAnyExtOrTrunc(DstReg, SrcReg);
+      markInstAndDefDead(MI, *DefMI, DeadInsts);
+      return true;
+    }
+    return tryFoldImplicitDef(MI, DeadInsts);
+  }
+
+  bool tryCombineZExt(MachineInstr &MI,
+                      SmallVectorImpl<MachineInstr *> &DeadInsts) {
+
+    if (MI.getOpcode() != TargetOpcode::G_ZEXT)
+      return false;
+    if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_TRUNC,
+                                           MI.getOperand(1).getReg(), MRI)) {
+      unsigned DstReg = MI.getOperand(0).getReg();
+      LLT DstTy = MRI.getType(DstReg);
+      if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
+          isInstUnsupported({TargetOpcode::G_CONSTANT, {DstTy}}))
+        return false;
+      DEBUG(dbgs() << ".. Combine MI: " << MI;);
+      Builder.setInstr(MI);
+      unsigned ZExtSrc = MI.getOperand(1).getReg();
+      LLT ZExtSrcTy = MRI.getType(ZExtSrc);
+      APInt Mask = APInt::getAllOnesValue(ZExtSrcTy.getSizeInBits());
+      auto MaskCstMIB = Builder.buildConstant(DstTy, Mask.getZExtValue());
+      unsigned TruncSrc = DefMI->getOperand(1).getReg();
+      // We get a copy/trunc/extend depending on the sizes
+      auto SrcCopyOrTrunc = Builder.buildAnyExtOrTrunc(DstTy, TruncSrc);
+      Builder.buildAnd(DstReg, SrcCopyOrTrunc, MaskCstMIB);
+      markInstAndDefDead(MI, *DefMI, DeadInsts);
+      return true;
+    }
+    return tryFoldImplicitDef(MI, DeadInsts);
+  }
+
+  bool tryCombineSExt(MachineInstr &MI,
+                      SmallVectorImpl<MachineInstr *> &DeadInsts) {
+
+    if (MI.getOpcode() != TargetOpcode::G_SEXT)
+      return false;
+    if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_TRUNC,
+                                           MI.getOperand(1).getReg(), MRI)) {
+      unsigned DstReg = MI.getOperand(0).getReg();
+      LLT DstTy = MRI.getType(DstReg);
+      if (isInstUnsupported({TargetOpcode::G_SHL, {DstTy}}) ||
+          isInstUnsupported({TargetOpcode::G_ASHR, {DstTy}}) ||
+          isInstUnsupported({TargetOpcode::G_CONSTANT, {DstTy}}))
+        return false;
+      DEBUG(dbgs() << ".. Combine MI: " << MI;);
+      Builder.setInstr(MI);
+      unsigned SExtSrc = MI.getOperand(1).getReg();
+      LLT SExtSrcTy = MRI.getType(SExtSrc);
+      unsigned SizeDiff = DstTy.getSizeInBits() - SExtSrcTy.getSizeInBits();
+      auto SizeDiffMIB = Builder.buildConstant(DstTy, SizeDiff);
+      unsigned TruncSrcReg = DefMI->getOperand(1).getReg();
+      // We get a copy/trunc/extend depending on the sizes
+      auto SrcCopyExtOrTrunc = Builder.buildAnyExtOrTrunc(DstTy, TruncSrcReg);
+      auto ShlMIB = Builder.buildInstr(TargetOpcode::G_SHL, DstTy,
+                                       SrcCopyExtOrTrunc, SizeDiffMIB);
+      Builder.buildInstr(TargetOpcode::G_ASHR, DstReg, ShlMIB, SizeDiffMIB);
+      markInstAndDefDead(MI, *DefMI, DeadInsts);
+      return true;
+    }
+    return tryFoldImplicitDef(MI, DeadInsts);
+  }
+
+  /// Try to fold sb = EXTEND (G_IMPLICIT_DEF sa) -> sb = G_IMPLICIT_DEF
+  bool tryFoldImplicitDef(MachineInstr &MI,
+                          SmallVectorImpl<MachineInstr *> &DeadInsts) {
+    unsigned Opcode = MI.getOpcode();
+    if (Opcode != TargetOpcode::G_ANYEXT && Opcode != TargetOpcode::G_ZEXT &&
+        Opcode != TargetOpcode::G_SEXT)
+      return false;
+
+    if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
+                                           MI.getOperand(1).getReg(), MRI)) {
+      unsigned DstReg = MI.getOperand(0).getReg();
+      LLT DstTy = MRI.getType(DstReg);
+      if (isInstUnsupported({TargetOpcode::G_IMPLICIT_DEF, {DstTy}}))
+        return false;
+      DEBUG(dbgs() << ".. Combine EXT(IMPLICIT_DEF) " << MI;);
+      Builder.setInstr(MI);
+      Builder.buildInstr(TargetOpcode::G_IMPLICIT_DEF, DstReg);
+      markInstAndDefDead(MI, *DefMI, DeadInsts);
+      return true;
+    }
+    return false;
+  }
+
+  bool tryCombineMerges(MachineInstr &MI,
+                        SmallVectorImpl<MachineInstr *> &DeadInsts) {
+
+    if (MI.getOpcode() != TargetOpcode::G_UNMERGE_VALUES)
+      return false;
+
+    unsigned NumDefs = MI.getNumOperands() - 1;
+    unsigned SrcReg = MI.getOperand(NumDefs).getReg();
+    MachineInstr *MergeI = MRI.getVRegDef(SrcReg);
+    if (!MergeI || (MergeI->getOpcode() != TargetOpcode::G_MERGE_VALUES))
+      return false;
+
+    const unsigned NumMergeRegs = MergeI->getNumOperands() - 1;
+
+    if (NumMergeRegs < NumDefs) {
+      if (NumDefs % NumMergeRegs != 0)
+        return false;
+
+      Builder.setInstr(MI);
+      // Transform to UNMERGEs, for example
+      //   %1 = G_MERGE_VALUES %4, %5
+      //   %9, %10, %11, %12 = G_UNMERGE_VALUES %1
+      // to
+      //   %9, %10 = G_UNMERGE_VALUES %4
+      //   %11, %12 = G_UNMERGE_VALUES %5
+
+      const unsigned NewNumDefs = NumDefs / NumMergeRegs;
+      for (unsigned Idx = 0; Idx < NumMergeRegs; ++Idx) {
+        SmallVector<unsigned, 2> DstRegs;
+        for (unsigned j = 0, DefIdx = Idx * NewNumDefs; j < NewNumDefs;
+             ++j, ++DefIdx)
+          DstRegs.push_back(MI.getOperand(DefIdx).getReg());
+
+        Builder.buildUnmerge(DstRegs, MergeI->getOperand(Idx + 1).getReg());
+      }
+
+    } else if (NumMergeRegs > NumDefs) {
+      if (NumMergeRegs % NumDefs != 0)
+        return false;
+
+      Builder.setInstr(MI);
+      // Transform to MERGEs
+      //   %6 = G_MERGE_VALUES %17, %18, %19, %20
+      //   %7, %8 = G_UNMERGE_VALUES %6
+      // to
+      //   %7 = G_MERGE_VALUES %17, %18
+      //   %8 = G_MERGE_VALUES %19, %20
+
+      const unsigned NumRegs = NumMergeRegs / NumDefs;
+      for (unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) {
+        SmallVector<unsigned, 2> Regs;
+        for (unsigned j = 0, Idx = NumRegs * DefIdx + 1; j < NumRegs;
+             ++j, ++Idx)
+          Regs.push_back(MergeI->getOperand(Idx).getReg());
+
+        Builder.buildMerge(MI.getOperand(DefIdx).getReg(), Regs);
+      }
+
+    } else {
+      // FIXME: is a COPY appropriate if the types mismatch? We know both
+      // registers are allocatable by now.
+      if (MRI.getType(MI.getOperand(0).getReg()) !=
+          MRI.getType(MergeI->getOperand(1).getReg()))
+        return false;
+
+      for (unsigned Idx = 0; Idx < NumDefs; ++Idx)
+        MRI.replaceRegWith(MI.getOperand(Idx).getReg(),
+                           MergeI->getOperand(Idx + 1).getReg());
+    }
+
+    markInstAndDefDead(MI, *MergeI, DeadInsts);
+    return true;
+  }
+
+  /// Try to combine away MI.
+  /// Returns true if it combined away the MI.
+  /// Adds instructions that are dead as a result of the combine
+  /// into DeadInsts, which can include MI.
+  bool tryCombineInstruction(MachineInstr &MI,
+                             SmallVectorImpl<MachineInstr *> &DeadInsts) {
+    switch (MI.getOpcode()) {
+    default:
+      return false;
+    case TargetOpcode::G_ANYEXT:
+      return tryCombineAnyExt(MI, DeadInsts);
+    case TargetOpcode::G_ZEXT:
+      return tryCombineZExt(MI, DeadInsts);
+    case TargetOpcode::G_SEXT:
+      return tryCombineSExt(MI, DeadInsts);
+    case TargetOpcode::G_UNMERGE_VALUES:
+      return tryCombineMerges(MI, DeadInsts);
+    case TargetOpcode::G_TRUNC: {
+      bool Changed = false;
+      for (auto &Use : MRI.use_instructions(MI.getOperand(0).getReg()))
+        Changed |= tryCombineInstruction(Use, DeadInsts);
+      return Changed;
+    }
+    }
+  }
+
+private:
+  /// Mark MI as dead. If a def of one of MI's operands, DefMI, would also be
+  /// dead due to MI being killed, then mark DefMI as dead too.
+  /// Some of the combines (extends(trunc)), try to walk through redundant
+  /// copies in between the extends and the truncs, and this attempts to collect
+  /// the in between copies if they're dead.
+  void markInstAndDefDead(MachineInstr &MI, MachineInstr &DefMI,
+                          SmallVectorImpl<MachineInstr *> &DeadInsts) {
+    DeadInsts.push_back(&MI);
+
+    // Collect all the copy instructions that are made dead, due to deleting
+    // this instruction. Collect all of them until the Trunc(DefMI).
+    // Eg,
+    // %1(s1) = G_TRUNC %0(s32)
+    // %2(s1) = COPY %1(s1)
+    // %3(s1) = COPY %2(s1)
+    // %4(s32) = G_ANYEXT %3(s1)
+    // In this case, we would have replaced %4 with a copy of %0,
+    // and as a result, %3, %2, %1 are dead.
+    MachineInstr *PrevMI = &MI;
+    while (PrevMI != &DefMI) {
+      // If we're dealing with G_UNMERGE_VALUES, tryCombineMerges doesn't really try
+      // to fold copies in between and we can ignore them here.
+      if (PrevMI->getOpcode() == TargetOpcode::G_UNMERGE_VALUES)
+        break;
+      unsigned PrevRegSrc = PrevMI->getOperand(1).getReg();
+      MachineInstr *TmpDef = MRI.getVRegDef(PrevRegSrc);
+      if (MRI.hasOneUse(PrevRegSrc)) {
+        if (TmpDef != &DefMI) {
+          assert(TmpDef->getOpcode() == TargetOpcode::COPY &&
+                 "Expecting copy here");
+          DeadInsts.push_back(TmpDef);
+        }
+      } else
+        break;
+      PrevMI = TmpDef;
+    }
+    if ((PrevMI == &DefMI ||
+         DefMI.getOpcode() == TargetOpcode::G_MERGE_VALUES) &&
+        MRI.hasOneUse(DefMI.getOperand(0).getReg()))
+      DeadInsts.push_back(&DefMI);
+  }
+
+  /// Checks if the target legalizer info has specified anything about the
+  /// instruction, or if unsupported.
+  bool isInstUnsupported(const LegalityQuery &Query) const {
+    using namespace LegalizeActions;
+    auto Step = LI.getAction(Query);
+    return Step.Action == Unsupported || Step.Action == NotFound;
+  }
+};
+
+} // namespace llvm
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Legalizer.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Legalizer.h
new file mode 100644
index 0000000..8284ab6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Legalizer.h
@@ -0,0 +1,65 @@
+//== llvm/CodeGen/GlobalISel/LegalizePass.h ------------- -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file A pass to convert the target-illegal operations created by IR -> MIR
+/// translation into ones the target expects to be able to select. This may
+/// occur in multiple phases, for example G_ADD <2 x i8> -> G_ADD <2 x i16> ->
+/// G_ADD <4 x i16>.
+///
+/// The LegalizeHelper class is where most of the work happens, and is designed
+/// to be callable from other passes that find themselves with an illegal
+/// instruction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_LEGALIZEMACHINEIRPASS_H
+#define LLVM_CODEGEN_GLOBALISEL_LEGALIZEMACHINEIRPASS_H
+
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+class MachineRegisterInfo;
+
+class Legalizer : public MachineFunctionPass {
+public:
+  static char ID;
+
+private:
+
+  /// Initialize the field members using \p MF.
+  void init(MachineFunction &MF);
+
+public:
+  // Ctor, nothing fancy.
+  Legalizer();
+
+  StringRef getPassName() const override { return "Legalizer"; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  MachineFunctionProperties getRequiredProperties() const override {
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::IsSSA);
+  }
+
+  MachineFunctionProperties getSetProperties() const override {
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::Legalized);
+  }
+
+  bool combineExtracts(MachineInstr &MI, MachineRegisterInfo &MRI,
+                       const TargetInstrInfo &TII);
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // End namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
new file mode 100644
index 0000000..8bd8a9d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -0,0 +1,115 @@
+//== llvm/CodeGen/GlobalISel/LegalizerHelper.h ---------------- -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file A pass to convert the target-illegal operations created by IR -> MIR
+/// translation into ones the target expects to be able to select. This may
+/// occur in multiple phases, for example G_ADD <2 x i8> -> G_ADD <2 x i16> ->
+/// G_ADD <4 x i16>.
+///
+/// The LegalizerHelper class is where most of the work happens, and is
+/// designed to be callable from other passes that find themselves with an
+/// illegal instruction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_MACHINELEGALIZEHELPER_H
+#define LLVM_CODEGEN_GLOBALISEL_MACHINELEGALIZEHELPER_H
+
+#include "llvm/CodeGen/GlobalISel/CallLowering.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/LowLevelType.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/RuntimeLibcalls.h"
+
+namespace llvm {
+// Forward declarations.
+class LegalizerInfo;
+class Legalizer;
+class MachineRegisterInfo;
+
+class LegalizerHelper {
+public:
+  enum LegalizeResult {
+    /// Instruction was already legal and no change was made to the
+    /// MachineFunction.
+    AlreadyLegal,
+
+    /// Instruction has been legalized and the MachineFunction changed.
+    Legalized,
+
+    /// Some kind of error has occurred and we could not legalize this
+    /// instruction.
+    UnableToLegalize,
+  };
+
+  LegalizerHelper(MachineFunction &MF);
+
+  /// Replace \p MI by a sequence of legal instructions that can implement the
+  /// same operation. Note that this means \p MI may be deleted, so any iterator
+  /// steps should be performed before calling this function. \p Helper should
+  /// be initialized to the MachineFunction containing \p MI.
+  ///
+  /// Considered as an opaque blob, the legal code will use and define the same
+  /// registers as \p MI.
+  LegalizeResult legalizeInstrStep(MachineInstr &MI);
+
+  /// Legalize an instruction by emiting a runtime library call instead.
+  LegalizeResult libcall(MachineInstr &MI);
+
+  /// Legalize an instruction by reducing the width of the underlying scalar
+  /// type.
+  LegalizeResult narrowScalar(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
+
+  /// Legalize an instruction by performing the operation on a wider scalar type
+  /// (for example a 16-bit addition can be safely performed at 32-bits
+  /// precision, ignoring the unused bits).
+  LegalizeResult widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
+
+  /// Legalize an instruction by splitting it into simpler parts, hopefully
+  /// understood by the target.
+  LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+
+  /// Legalize a vector instruction by splitting into multiple components, each
+  /// acting on the same scalar type as the original but with fewer elements.
+  LegalizeResult fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
+                                     LLT NarrowTy);
+
+  /// Legalize a vector instruction by increasing the number of vector elements
+  /// involved and ignoring the added elements later.
+  LegalizeResult moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
+                                    LLT WideTy);
+
+  /// Expose MIRBuilder so clients can set their own RecordInsertInstruction
+  /// functions
+  MachineIRBuilder MIRBuilder;
+
+  /// Expose LegalizerInfo so the clients can re-use.
+  const LegalizerInfo &getLegalizerInfo() const { return LI; }
+
+private:
+
+  /// Helper function to split a wide generic register into bitwise blocks with
+  /// the given Type (which implies the number of blocks needed). The generic
+  /// registers created are appended to Ops, starting at bit 0 of Reg.
+  void extractParts(unsigned Reg, LLT Ty, int NumParts,
+                    SmallVectorImpl<unsigned> &Ops);
+
+  MachineRegisterInfo &MRI;
+  const LegalizerInfo &LI;
+};
+
+/// Helper function that creates the given libcall.
+LegalizerHelper::LegalizeResult
+createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
+              const CallLowering::ArgInfo &Result,
+              ArrayRef<CallLowering::ArgInfo> Args);
+
+} // End namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
new file mode 100644
index 0000000..117c791
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -0,0 +1,920 @@
+//===- llvm/CodeGen/GlobalISel/LegalizerInfo.h ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// Interface for Targets to specify which operations they can successfully
+/// select and how the others should be expanded most efficiently.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_LEGALIZERINFO_H
+#define LLVM_CODEGEN_GLOBALISEL_LEGALIZERINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/LowLevelTypeImpl.h"
+#include <cassert>
+#include <cstdint>
+#include <tuple>
+#include <unordered_map>
+#include <utility>
+
+namespace llvm {
+
+extern cl::opt<bool> DisableGISelLegalityCheck;
+
+class MachineInstr;
+class MachineIRBuilder;
+class MachineRegisterInfo;
+
+namespace LegalizeActions {
+enum LegalizeAction : std::uint8_t {
+  /// The operation is expected to be selectable directly by the target, and
+  /// no transformation is necessary.
+  Legal,
+
+  /// The operation should be synthesized from multiple instructions acting on
+  /// a narrower scalar base-type. For example a 64-bit add might be
+  /// implemented in terms of 32-bit add-with-carry.
+  NarrowScalar,
+
+  /// The operation should be implemented in terms of a wider scalar
+  /// base-type. For example a <2 x s8> add could be implemented as a <2
+  /// x s32> add (ignoring the high bits).
+  WidenScalar,
+
+  /// The (vector) operation should be implemented by splitting it into
+  /// sub-vectors where the operation is legal. For example a <8 x s64> add
+  /// might be implemented as 4 separate <2 x s64> adds.
+  FewerElements,
+
+  /// The (vector) operation should be implemented by widening the input
+  /// vector and ignoring the lanes added by doing so. For example <2 x i8> is
+  /// rarely legal, but you might perform an <8 x i8> and then only look at
+  /// the first two results.
+  MoreElements,
+
+  /// The operation itself must be expressed in terms of simpler actions on
+  /// this target. E.g. a SREM replaced by an SDIV and subtraction.
+  Lower,
+
+  /// The operation should be implemented as a call to some kind of runtime
+  /// support library. For example this usually happens on machines that don't
+  /// support floating-point operations natively.
+  Libcall,
+
+  /// The target wants to do something special with this combination of
+  /// operand and type. A callback will be issued when it is needed.
+  Custom,
+
+  /// This operation is completely unsupported on the target. A programming
+  /// error has occurred.
+  Unsupported,
+
+  /// Sentinel value for when no action was found in the specified table.
+  NotFound,
+
+  /// Fall back onto the old rules.
+  /// TODO: Remove this once we've migrated
+  UseLegacyRules,
+};
+} // end namespace LegalizeActions
+
+using LegalizeActions::LegalizeAction;
+
+/// Legalization is decided based on an instruction's opcode, which type slot
+/// we're considering, and what the existing type is. These aspects are gathered
+/// together for convenience in the InstrAspect class.
+struct InstrAspect {
+  unsigned Opcode;
+  unsigned Idx = 0;
+  LLT Type;
+
+  InstrAspect(unsigned Opcode, LLT Type) : Opcode(Opcode), Type(Type) {}
+  InstrAspect(unsigned Opcode, unsigned Idx, LLT Type)
+      : Opcode(Opcode), Idx(Idx), Type(Type) {}
+
+  bool operator==(const InstrAspect &RHS) const {
+    return Opcode == RHS.Opcode && Idx == RHS.Idx && Type == RHS.Type;
+  }
+};
+
+/// The LegalityQuery object bundles together all the information that's needed
+/// to decide whether a given operation is legal or not.
+/// For efficiency, it doesn't make a copy of Types so care must be taken not
+/// to free it before using the query.
+struct LegalityQuery {
+  unsigned Opcode;
+  ArrayRef<LLT> Types;
+
+  raw_ostream &print(raw_ostream &OS) const;
+};
+
+/// The result of a query. It either indicates a final answer of Legal or
+/// Unsupported or describes an action that must be taken to make an operation
+/// more legal.
+struct LegalizeActionStep {
+  /// The action to take or the final answer.
+  LegalizeAction Action;
+  /// If describing an action, the type index to change. Otherwise zero.
+  unsigned TypeIdx;
+  /// If describing an action, the new type for TypeIdx. Otherwise LLT{}.
+  LLT NewType;
+
+  LegalizeActionStep(LegalizeAction Action, unsigned TypeIdx,
+                     const LLT &NewType)
+      : Action(Action), TypeIdx(TypeIdx), NewType(NewType) {}
+
+  bool operator==(const LegalizeActionStep &RHS) const {
+    return std::tie(Action, TypeIdx, NewType) ==
+        std::tie(RHS.Action, RHS.TypeIdx, RHS.NewType);
+  }
+};
+
+using LegalityPredicate = std::function<bool (const LegalityQuery &)>;
+using LegalizeMutation =
+    std::function<std::pair<unsigned, LLT>(const LegalityQuery &)>;
+
+namespace LegalityPredicates {
+/// True iff P0 and P1 are true.
+LegalityPredicate all(LegalityPredicate P0, LegalityPredicate P1);
+/// True iff the given type index is one of the specified types.
+LegalityPredicate typeInSet(unsigned TypeIdx,
+                            std::initializer_list<LLT> TypesInit);
+/// True iff the given types for the given pair of type indexes is one of the
+/// specified type pairs.
+LegalityPredicate
+typePairInSet(unsigned TypeIdx0, unsigned TypeIdx1,
+              std::initializer_list<std::pair<LLT, LLT>> TypesInit);
+/// True iff the specified type index is a scalar.
+LegalityPredicate isScalar(unsigned TypeIdx);
+/// True iff the specified type index is a scalar that's narrower than the given
+/// size.
+LegalityPredicate narrowerThan(unsigned TypeIdx, unsigned Size);
+/// True iff the specified type index is a scalar that's wider than the given
+/// size.
+LegalityPredicate widerThan(unsigned TypeIdx, unsigned Size);
+/// True iff the specified type index is a scalar whose size is not a power of
+/// 2.
+LegalityPredicate sizeNotPow2(unsigned TypeIdx);
+/// True iff the specified type index is a vector whose element count is not a
+/// power of 2.
+LegalityPredicate numElementsNotPow2(unsigned TypeIdx);
+} // end namespace LegalityPredicates
+
+namespace LegalizeMutations {
+/// Select this specific type for the given type index.
+LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty);
+/// Widen the type for the given type index to the next power of 2.
+LegalizeMutation widenScalarToNextPow2(unsigned TypeIdx, unsigned Min = 0);
+/// Add more elements to the type for the given type index to the next power of
+/// 2.
+LegalizeMutation moreElementsToNextPow2(unsigned TypeIdx, unsigned Min = 0);
+} // end namespace LegalizeMutations
+
+/// A single rule in a legalizer info ruleset.
+/// The specified action is chosen when the predicate is true. Where appropriate
+/// for the action (e.g. for WidenScalar) the new type is selected using the
+/// given mutator.
+class LegalizeRule {
+  LegalityPredicate Predicate;
+  LegalizeAction Action;
+  LegalizeMutation Mutation;
+
+public:
+  LegalizeRule(LegalityPredicate Predicate, LegalizeAction Action,
+               LegalizeMutation Mutation = nullptr)
+      : Predicate(Predicate), Action(Action), Mutation(Mutation) {}
+
+  /// Test whether the LegalityQuery matches.
+  bool match(const LegalityQuery &Query) const {
+    return Predicate(Query);
+  }
+
+  LegalizeAction getAction() const { return Action; }
+
+  /// Determine the change to make.
+  std::pair<unsigned, LLT> determineMutation(const LegalityQuery &Query) const {
+    if (Mutation)
+      return Mutation(Query);
+    return std::make_pair(0, LLT{});
+  }
+};
+
+class LegalizeRuleSet {
+  /// When non-zero, the opcode we are an alias of
+  unsigned AliasOf;
+  /// If true, there is another opcode that aliases this one
+  bool IsAliasedByAnother;
+  SmallVector<LegalizeRule, 2> Rules;
+
+  void add(const LegalizeRule &Rule) {
+    assert(AliasOf == 0 &&
+           "RuleSet is aliased, change the representative opcode instead");
+    Rules.push_back(Rule);
+  }
+
+  static bool always(const LegalityQuery &) { return true; }
+
+  /// Use the given action when the predicate is true.
+  /// Action should not be an action that requires mutation.
+  LegalizeRuleSet &actionIf(LegalizeAction Action,
+                            LegalityPredicate Predicate) {
+    add({Predicate, Action});
+    return *this;
+  }
+  /// Use the given action when the predicate is true.
+  /// Action should not be an action that requires mutation.
+  LegalizeRuleSet &actionIf(LegalizeAction Action, LegalityPredicate Predicate,
+                            LegalizeMutation Mutation) {
+    add({Predicate, Action, Mutation});
+    return *this;
+  }
+  /// Use the given action when type index 0 is any type in the given list.
+  /// Action should not be an action that requires mutation.
+  LegalizeRuleSet &actionFor(LegalizeAction Action,
+                             std::initializer_list<LLT> Types) {
+    using namespace LegalityPredicates;
+    return actionIf(Action, typeInSet(0, Types));
+  }
+  /// Use the given action when type indexes 0 and 1 is any type pair in the
+  /// given list.
+  /// Action should not be an action that requires mutation.
+  LegalizeRuleSet &
+  actionFor(LegalizeAction Action,
+            std::initializer_list<std::pair<LLT, LLT>> Types) {
+    using namespace LegalityPredicates;
+    return actionIf(Action, typePairInSet(0, 1, Types));
+  }
+  /// Use the given action when type indexes 0 and 1 are both in the given list.
+  /// That is, the type pair is in the cartesian product of the list.
+  /// Action should not be an action that requires mutation.
+  LegalizeRuleSet &actionForCartesianProduct(LegalizeAction Action,
+                                             std::initializer_list<LLT> Types) {
+    using namespace LegalityPredicates;
+    return actionIf(Action, all(typeInSet(0, Types), typeInSet(1, Types)));
+  }
+  /// Use the given action when type indexes 0 and 1 are both their respective
+  /// lists.
+  /// That is, the type pair is in the cartesian product of the lists
+  /// Action should not be an action that requires mutation.
+  LegalizeRuleSet &
+  actionForCartesianProduct(LegalizeAction Action,
+                            std::initializer_list<LLT> Types0,
+                            std::initializer_list<LLT> Types1) {
+    using namespace LegalityPredicates;
+    return actionIf(Action, all(typeInSet(0, Types0), typeInSet(1, Types1)));
+  }
+
+public:
+  LegalizeRuleSet() : AliasOf(0), IsAliasedByAnother(false), Rules() {}
+
+  bool isAliasedByAnother() { return IsAliasedByAnother; }
+  void setIsAliasedByAnother() { IsAliasedByAnother = true; }
+  void aliasTo(unsigned Opcode) {
+    assert((AliasOf == 0 || AliasOf == Opcode) &&
+           "Opcode is already aliased to another opcode");
+    assert(Rules.empty() && "Aliasing will discard rules");
+    AliasOf = Opcode;
+  }
+  unsigned getAlias() const { return AliasOf; }
+
+  /// The instruction is legal if predicate is true.
+  LegalizeRuleSet &legalIf(LegalityPredicate Predicate) {
+    return actionIf(LegalizeAction::Legal, Predicate);
+  }
+  /// The instruction is legal when type index 0 is any type in the given list.
+  LegalizeRuleSet &legalFor(std::initializer_list<LLT> Types) {
+    return actionFor(LegalizeAction::Legal, Types);
+  }
+  /// The instruction is legal when type indexes 0 and 1 is any type pair in the
+  /// given list.
+  LegalizeRuleSet &legalFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
+    return actionFor(LegalizeAction::Legal, Types);
+  }
+  /// The instruction is legal when type indexes 0 and 1 are both in the given
+  /// list. That is, the type pair is in the cartesian product of the list.
+  LegalizeRuleSet &legalForCartesianProduct(std::initializer_list<LLT> Types) {
+    return actionForCartesianProduct(LegalizeAction::Legal, Types);
+  }
+  /// The instruction is legal when type indexes 0 and 1 are both their
+  /// respective lists.
+  LegalizeRuleSet &legalForCartesianProduct(std::initializer_list<LLT> Types0,
+                                            std::initializer_list<LLT> Types1) {
+    return actionForCartesianProduct(LegalizeAction::Legal, Types0, Types1);
+  }
+
+  /// Like legalIf, but for the Libcall action.
+  LegalizeRuleSet &libcallIf(LegalityPredicate Predicate) {
+    return actionIf(LegalizeAction::Libcall, Predicate);
+  }
+  LegalizeRuleSet &libcallFor(std::initializer_list<LLT> Types) {
+    return actionFor(LegalizeAction::Libcall, Types);
+  }
+  LegalizeRuleSet &
+  libcallFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
+    return actionFor(LegalizeAction::Libcall, Types);
+  }
+  LegalizeRuleSet &
+  libcallForCartesianProduct(std::initializer_list<LLT> Types) {
+    return actionForCartesianProduct(LegalizeAction::Libcall, Types);
+  }
+  LegalizeRuleSet &
+  libcallForCartesianProduct(std::initializer_list<LLT> Types0,
+                             std::initializer_list<LLT> Types1) {
+    return actionForCartesianProduct(LegalizeAction::Libcall, Types0, Types1);
+  }
+
+  /// Widen the scalar to the one selected by the mutation if the predicate is
+  /// true.
+  LegalizeRuleSet &widenScalarIf(LegalityPredicate Predicate,
+                                 LegalizeMutation Mutation) {
+    return actionIf(LegalizeAction::WidenScalar, Predicate, Mutation);
+  }
+  /// Narrow the scalar to the one selected by the mutation if the predicate is
+  /// true.
+  LegalizeRuleSet &narrowScalarIf(LegalityPredicate Predicate,
+                                  LegalizeMutation Mutation) {
+    return actionIf(LegalizeAction::NarrowScalar, Predicate, Mutation);
+  }
+
+  /// Add more elements to reach the type selected by the mutation if the
+  /// predicate is true.
+  LegalizeRuleSet &moreElementsIf(LegalityPredicate Predicate,
+                                  LegalizeMutation Mutation) {
+    return actionIf(LegalizeAction::MoreElements, Predicate, Mutation);
+  }
+  /// Remove elements to reach the type selected by the mutation if the
+  /// predicate is true.
+  LegalizeRuleSet &fewerElementsIf(LegalityPredicate Predicate,
+                                   LegalizeMutation Mutation) {
+    return actionIf(LegalizeAction::FewerElements, Predicate, Mutation);
+  }
+
+  /// The instruction is unsupported.
+  LegalizeRuleSet &unsupported() {
+    return actionIf(LegalizeAction::Unsupported, always);
+  }
+  LegalizeRuleSet &unsupportedIf(LegalityPredicate Predicate) {
+    return actionIf(LegalizeAction::Unsupported, Predicate);
+  }
+
+  LegalizeRuleSet &customIf(LegalityPredicate Predicate) {
+    return actionIf(LegalizeAction::Custom, Predicate);
+  }
+  LegalizeRuleSet &customFor(std::initializer_list<LLT> Types) {
+    return actionFor(LegalizeAction::Custom, Types);
+  }
+  LegalizeRuleSet &customForCartesianProduct(std::initializer_list<LLT> Types) {
+    return actionForCartesianProduct(LegalizeAction::Custom, Types);
+  }
+  LegalizeRuleSet &
+  customForCartesianProduct(std::initializer_list<LLT> Types0,
+                            std::initializer_list<LLT> Types1) {
+    return actionForCartesianProduct(LegalizeAction::Custom, Types0, Types1);
+  }
+
+  /// Widen the scalar to the next power of two that is at least MinSize.
+  /// No effect if the type is not a scalar or is a power of two.
+  LegalizeRuleSet &widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize = 0) {
+    using namespace LegalityPredicates;
+    return widenScalarIf(
+        sizeNotPow2(TypeIdx),
+        LegalizeMutations::widenScalarToNextPow2(TypeIdx, MinSize));
+  }
+
+  LegalizeRuleSet &narrowScalar(unsigned TypeIdx, LegalizeMutation Mutation) {
+    using namespace LegalityPredicates;
+    return narrowScalarIf(isScalar(TypeIdx), Mutation);
+  }
+
+  /// Ensure the scalar is at least as wide as Ty.
+  LegalizeRuleSet &minScalar(unsigned TypeIdx, const LLT &Ty) {
+    using namespace LegalityPredicates;
+    using namespace LegalizeMutations;
+    return widenScalarIf(narrowerThan(TypeIdx, Ty.getSizeInBits()),
+                         changeTo(TypeIdx, Ty));
+  }
+
+  /// Ensure the scalar is at most as wide as Ty.
+  LegalizeRuleSet &maxScalar(unsigned TypeIdx, const LLT &Ty) {
+    using namespace LegalityPredicates;
+    using namespace LegalizeMutations;
+    return narrowScalarIf(widerThan(TypeIdx, Ty.getSizeInBits()),
+                          changeTo(TypeIdx, Ty));
+  }
+
+  /// Conditionally limit the maximum size of the scalar.
+  /// For example, when the maximum size of one type depends on the size of
+  /// another such as extracting N bits from an M bit container.
+  LegalizeRuleSet &maxScalarIf(LegalityPredicate Predicate, unsigned TypeIdx, const LLT &Ty) {
+    using namespace LegalityPredicates;
+    using namespace LegalizeMutations;
+    return narrowScalarIf(
+        [=](const LegalityQuery &Query) {
+          return widerThan(TypeIdx, Ty.getSizeInBits()) &&
+                 Predicate(Query);
+        },
+        changeTo(TypeIdx, Ty));
+  }
+
+  /// Limit the range of scalar sizes to MinTy and MaxTy.
+  LegalizeRuleSet &clampScalar(unsigned TypeIdx, const LLT &MinTy, const LLT &MaxTy) {
+    assert(MinTy.isScalar() && MaxTy.isScalar() && "Expected scalar types");
+
+    return minScalar(TypeIdx, MinTy)
+        .maxScalar(TypeIdx, MaxTy);
+  }
+
+  /// Add more elements to the vector to reach the next power of two.
+  /// No effect if the type is not a vector or the element count is a power of
+  /// two.
+  LegalizeRuleSet &moreElementsToNextPow2(unsigned TypeIdx) {
+    using namespace LegalityPredicates;
+    return moreElementsIf(numElementsNotPow2(TypeIdx),
+                          LegalizeMutations::moreElementsToNextPow2(TypeIdx));
+  }
+
+  /// Limit the number of elements in EltTy vectors to at least MinElements.
+  LegalizeRuleSet &clampMinNumElements(unsigned TypeIdx, const LLT &EltTy,
+                                       unsigned MinElements) {
+    return moreElementsIf(
+        [=](const LegalityQuery &Query) {
+          LLT VecTy = Query.Types[TypeIdx];
+          return VecTy.getElementType() == EltTy &&
+                 VecTy.getNumElements() < MinElements;
+        },
+        [=](const LegalityQuery &Query) {
+          LLT VecTy = Query.Types[TypeIdx];
+          return std::make_pair(
+              TypeIdx, LLT::vector(MinElements, VecTy.getScalarSizeInBits()));
+        });
+  }
+  /// Limit the number of elements in EltTy vectors to at most MaxElements.
+  LegalizeRuleSet &clampMaxNumElements(unsigned TypeIdx, const LLT &EltTy,
+                                       unsigned MaxElements) {
+    return fewerElementsIf(
+        [=](const LegalityQuery &Query) {
+          LLT VecTy = Query.Types[TypeIdx];
+          return VecTy.getElementType() == EltTy &&
+                 VecTy.getNumElements() > MaxElements;
+        },
+        [=](const LegalityQuery &Query) {
+          LLT VecTy = Query.Types[TypeIdx];
+          return std::make_pair(
+              TypeIdx, LLT::vector(MaxElements, VecTy.getScalarSizeInBits()));
+        });
+  }
+  /// Limit the number of elements for the given vectors to at least MinTy's
+  /// number of elements and at most MaxTy's number of elements.
+  ///
+  /// No effect if the type is not a vector or does not have the same element
+  /// type as the constraints.
+  /// The element type of MinTy and MaxTy must match.
+  LegalizeRuleSet &clampNumElements(unsigned TypeIdx, const LLT &MinTy,
+                                    const LLT &MaxTy) {
+    assert(MinTy.getElementType() == MaxTy.getElementType() &&
+           "Expected element types to agree");
+
+    const LLT &EltTy = MinTy.getElementType();
+    return clampMinNumElements(TypeIdx, EltTy, MinTy.getNumElements())
+        .clampMaxNumElements(TypeIdx, EltTy, MaxTy.getNumElements());
+  }
+
+  /// Fallback on the previous implementation. This should only be used while
+  /// porting a rule.
+  LegalizeRuleSet &fallback() {
+    add({always, LegalizeAction::UseLegacyRules});
+    return *this;
+  }
+
+  /// Apply the ruleset to the given LegalityQuery.
+  LegalizeActionStep apply(const LegalityQuery &Query) const;
+};
+
+class LegalizerInfo {
+public:
+  LegalizerInfo();
+  virtual ~LegalizerInfo() = default;
+
+  unsigned getOpcodeIdxForOpcode(unsigned Opcode) const;
+  unsigned getActionDefinitionsIdx(unsigned Opcode) const;
+
+  /// Compute any ancillary tables needed to quickly decide how an operation
+  /// should be handled. This must be called after all "set*Action"methods but
+  /// before any query is made or incorrect results may be returned.
+  void computeTables();
+
+  static bool needsLegalizingToDifferentSize(const LegalizeAction Action) {
+    using namespace LegalizeActions;
+    switch (Action) {
+    case NarrowScalar:
+    case WidenScalar:
+    case FewerElements:
+    case MoreElements:
+    case Unsupported:
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  using SizeAndAction = std::pair<uint16_t, LegalizeAction>;
+  using SizeAndActionsVec = std::vector<SizeAndAction>;
+  using SizeChangeStrategy =
+      std::function<SizeAndActionsVec(const SizeAndActionsVec &v)>;
+
+  /// More friendly way to set an action for common types that have an LLT
+  /// representation.
+  /// The LegalizeAction must be one for which NeedsLegalizingToDifferentSize
+  /// returns false.
+  void setAction(const InstrAspect &Aspect, LegalizeAction Action) {
+    assert(!needsLegalizingToDifferentSize(Action));
+    TablesInitialized = false;
+    const unsigned OpcodeIdx = Aspect.Opcode - FirstOp;
+    if (SpecifiedActions[OpcodeIdx].size() <= Aspect.Idx)
+      SpecifiedActions[OpcodeIdx].resize(Aspect.Idx + 1);
+    SpecifiedActions[OpcodeIdx][Aspect.Idx][Aspect.Type] = Action;
+  }
+
+  /// The setAction calls record the non-size-changing legalization actions
+  /// to take on specificly-sized types. The SizeChangeStrategy defines what
+  /// to do when the size of the type needs to be changed to reach a legally
+  /// sized type (i.e., one that was defined through a setAction call).
+  /// e.g.
+  /// setAction ({G_ADD, 0, LLT::scalar(32)}, Legal);
+  /// setLegalizeScalarToDifferentSizeStrategy(
+  ///   G_ADD, 0, widenToLargerTypesAndNarrowToLargest);
+  /// will end up defining getAction({G_ADD, 0, T}) to return the following 
+  /// actions for different scalar types T:
+  ///  LLT::scalar(1)..LLT::scalar(31): {WidenScalar, 0, LLT::scalar(32)}
+  ///  LLT::scalar(32):                 {Legal, 0, LLT::scalar(32)}
+  ///  LLT::scalar(33)..:               {NarrowScalar, 0, LLT::scalar(32)}
+  ///
+  /// If no SizeChangeAction gets defined, through this function,
+  /// the default is unsupportedForDifferentSizes.
+  void setLegalizeScalarToDifferentSizeStrategy(const unsigned Opcode,
+                                                const unsigned TypeIdx,
+                                                SizeChangeStrategy S) {
+    const unsigned OpcodeIdx = Opcode - FirstOp;
+    if (ScalarSizeChangeStrategies[OpcodeIdx].size() <= TypeIdx)
+      ScalarSizeChangeStrategies[OpcodeIdx].resize(TypeIdx + 1);
+    ScalarSizeChangeStrategies[OpcodeIdx][TypeIdx] = S;
+  }
+
+  /// See also setLegalizeScalarToDifferentSizeStrategy.
+  /// This function allows to set the SizeChangeStrategy for vector elements.
+  void setLegalizeVectorElementToDifferentSizeStrategy(const unsigned Opcode,
+                                                       const unsigned TypeIdx,
+                                                       SizeChangeStrategy S) {
+    const unsigned OpcodeIdx = Opcode - FirstOp;
+    if (VectorElementSizeChangeStrategies[OpcodeIdx].size() <= TypeIdx)
+      VectorElementSizeChangeStrategies[OpcodeIdx].resize(TypeIdx + 1);
+    VectorElementSizeChangeStrategies[OpcodeIdx][TypeIdx] = S;
+  }
+
+  /// A SizeChangeStrategy for the common case where legalization for a 
+  /// particular operation consists of only supporting a specific set of type
+  /// sizes. E.g.
+  ///   setAction ({G_DIV, 0, LLT::scalar(32)}, Legal);
+  ///   setAction ({G_DIV, 0, LLT::scalar(64)}, Legal);
+  ///   setLegalizeScalarToDifferentSizeStrategy(
+  ///     G_DIV, 0, unsupportedForDifferentSizes);
+  /// will result in getAction({G_DIV, 0, T}) to return Legal for s32 and s64,
+  /// and Unsupported for all other scalar types T.
+  static SizeAndActionsVec
+  unsupportedForDifferentSizes(const SizeAndActionsVec &v) {
+    using namespace LegalizeActions;
+    return increaseToLargerTypesAndDecreaseToLargest(v, Unsupported,
+                                                     Unsupported);
+  }
+
+  /// A SizeChangeStrategy for the common case where legalization for a
+  /// particular operation consists of widening the type to a large legal type,
+  /// unless there is no such type and then instead it should be narrowed to the
+  /// largest legal type.
+  static SizeAndActionsVec
+  widenToLargerTypesAndNarrowToLargest(const SizeAndActionsVec &v) {
+    using namespace LegalizeActions;
+    assert(v.size() > 0 &&
+           "At least one size that can be legalized towards is needed"
+           " for this SizeChangeStrategy");
+    return increaseToLargerTypesAndDecreaseToLargest(v, WidenScalar,
+                                                     NarrowScalar);
+  }
+
+  static SizeAndActionsVec
+  widenToLargerTypesUnsupportedOtherwise(const SizeAndActionsVec &v) {
+    using namespace LegalizeActions;
+    return increaseToLargerTypesAndDecreaseToLargest(v, WidenScalar,
+                                                     Unsupported);
+  }
+
+  static SizeAndActionsVec
+  narrowToSmallerAndUnsupportedIfTooSmall(const SizeAndActionsVec &v) {
+    using namespace LegalizeActions;
+    return decreaseToSmallerTypesAndIncreaseToSmallest(v, NarrowScalar,
+                                                       Unsupported);
+  }
+
+  static SizeAndActionsVec
+  narrowToSmallerAndWidenToSmallest(const SizeAndActionsVec &v) {
+    using namespace LegalizeActions;
+    assert(v.size() > 0 &&
+           "At least one size that can be legalized towards is needed"
+           " for this SizeChangeStrategy");
+    return decreaseToSmallerTypesAndIncreaseToSmallest(v, NarrowScalar,
+                                                       WidenScalar);
+  }
+
+  /// A SizeChangeStrategy for the common case where legalization for a
+  /// particular vector operation consists of having more elements in the
+  /// vector, to a type that is legal. Unless there is no such type and then
+  /// instead it should be legalized towards the widest vector that's still
+  /// legal. E.g.
+  ///   setAction({G_ADD, LLT::vector(8, 8)}, Legal);
+  ///   setAction({G_ADD, LLT::vector(16, 8)}, Legal);
+  ///   setAction({G_ADD, LLT::vector(2, 32)}, Legal);
+  ///   setAction({G_ADD, LLT::vector(4, 32)}, Legal);
+  ///   setLegalizeVectorElementToDifferentSizeStrategy(
+  ///     G_ADD, 0, moreToWiderTypesAndLessToWidest);
+  /// will result in the following getAction results:
+  ///   * getAction({G_ADD, LLT::vector(8,8)}) returns
+  ///       (Legal, vector(8,8)).
+  ///   * getAction({G_ADD, LLT::vector(9,8)}) returns
+  ///       (MoreElements, vector(16,8)).
+  ///   * getAction({G_ADD, LLT::vector(8,32)}) returns
+  ///       (FewerElements, vector(4,32)).
+  static SizeAndActionsVec
+  moreToWiderTypesAndLessToWidest(const SizeAndActionsVec &v) {
+    using namespace LegalizeActions;
+    return increaseToLargerTypesAndDecreaseToLargest(v, MoreElements,
+                                                     FewerElements);
+  }
+
+  /// Helper function to implement many typical SizeChangeStrategy functions.
+  static SizeAndActionsVec
+  increaseToLargerTypesAndDecreaseToLargest(const SizeAndActionsVec &v,
+                                            LegalizeAction IncreaseAction,
+                                            LegalizeAction DecreaseAction);
+  /// Helper function to implement many typical SizeChangeStrategy functions.
+  static SizeAndActionsVec
+  decreaseToSmallerTypesAndIncreaseToSmallest(const SizeAndActionsVec &v,
+                                              LegalizeAction DecreaseAction,
+                                              LegalizeAction IncreaseAction);
+
+  /// Get the action definitions for the given opcode. Use this to run a
+  /// LegalityQuery through the definitions.
+  const LegalizeRuleSet &getActionDefinitions(unsigned Opcode) const;
+
+  /// Get the action definition builder for the given opcode. Use this to define
+  /// the action definitions.
+  ///
+  /// It is an error to request an opcode that has already been requested by the
+  /// multiple-opcode variant.
+  LegalizeRuleSet &getActionDefinitionsBuilder(unsigned Opcode);
+
+  /// Get the action definition builder for the given set of opcodes. Use this
+  /// to define the action definitions for multiple opcodes at once. The first
+  /// opcode given will be considered the representative opcode and will hold
+  /// the definitions whereas the other opcodes will be configured to refer to
+  /// the representative opcode. This lowers memory requirements and very
+  /// slightly improves performance.
+  ///
+  /// It would be very easy to introduce unexpected side-effects as a result of
+  /// this aliasing if it were permitted to request different but intersecting
+  /// sets of opcodes but that is difficult to keep track of. It is therefore an
+  /// error to request the same opcode twice using this API, to request an
+  /// opcode that already has definitions, or to use the single-opcode API on an
+  /// opcode that has already been requested by this API.
+  LegalizeRuleSet &
+  getActionDefinitionsBuilder(std::initializer_list<unsigned> Opcodes);
+  void aliasActionDefinitions(unsigned OpcodeTo, unsigned OpcodeFrom);
+
+  /// Determine what action should be taken to legalize the described
+  /// instruction. Requires computeTables to have been called.
+  ///
+  /// \returns a description of the next legalization step to perform.
+  LegalizeActionStep getAction(const LegalityQuery &Query) const;
+
+  /// Determine what action should be taken to legalize the given generic
+  /// instruction.
+  ///
+  /// \returns a description of the next legalization step to perform.
+  LegalizeActionStep getAction(const MachineInstr &MI,
+                               const MachineRegisterInfo &MRI) const;
+
+  bool isLegal(const MachineInstr &MI, const MachineRegisterInfo &MRI) const;
+
+  virtual bool legalizeCustom(MachineInstr &MI,
+                              MachineRegisterInfo &MRI,
+                              MachineIRBuilder &MIRBuilder) const;
+
+private:
+  /// Determine what action should be taken to legalize the given generic
+  /// instruction opcode, type-index and type. Requires computeTables to have
+  /// been called.
+  ///
+  /// \returns a pair consisting of the kind of legalization that should be
+  /// performed and the destination type.
+  std::pair<LegalizeAction, LLT>
+  getAspectAction(const InstrAspect &Aspect) const;
+
+  /// The SizeAndActionsVec is a representation mapping between all natural
+  /// numbers and an Action. The natural number represents the bit size of
+  /// the InstrAspect. For example, for a target with native support for 32-bit
+  /// and 64-bit additions, you'd express that as:
+  /// setScalarAction(G_ADD, 0,
+  ///           {{1, WidenScalar},  // bit sizes [ 1, 31[
+  ///            {32, Legal},       // bit sizes [32, 33[
+  ///            {33, WidenScalar}, // bit sizes [33, 64[
+  ///            {64, Legal},       // bit sizes [64, 65[
+  ///            {65, NarrowScalar} // bit sizes [65, +inf[
+  ///           });
+  /// It may be that only 64-bit pointers are supported on your target:
+  /// setPointerAction(G_GEP, 0, LLT:pointer(1),
+  ///           {{1, Unsupported},  // bit sizes [ 1, 63[
+  ///            {64, Legal},       // bit sizes [64, 65[
+  ///            {65, Unsupported}, // bit sizes [65, +inf[
+  ///           });
+  void setScalarAction(const unsigned Opcode, const unsigned TypeIndex,
+                       const SizeAndActionsVec &SizeAndActions) {
+    const unsigned OpcodeIdx = Opcode - FirstOp;
+    SmallVector<SizeAndActionsVec, 1> &Actions = ScalarActions[OpcodeIdx];
+    setActions(TypeIndex, Actions, SizeAndActions);
+  }
+  void setPointerAction(const unsigned Opcode, const unsigned TypeIndex,
+                        const unsigned AddressSpace,
+                        const SizeAndActionsVec &SizeAndActions) {
+    const unsigned OpcodeIdx = Opcode - FirstOp;
+    if (AddrSpace2PointerActions[OpcodeIdx].find(AddressSpace) ==
+        AddrSpace2PointerActions[OpcodeIdx].end())
+      AddrSpace2PointerActions[OpcodeIdx][AddressSpace] = {{}};
+    SmallVector<SizeAndActionsVec, 1> &Actions =
+        AddrSpace2PointerActions[OpcodeIdx].find(AddressSpace)->second;
+    setActions(TypeIndex, Actions, SizeAndActions);
+  }
+
+  /// If an operation on a given vector type (say <M x iN>) isn't explicitly
+  /// specified, we proceed in 2 stages. First we legalize the underlying scalar
+  /// (so that there's at least one legal vector with that scalar), then we
+  /// adjust the number of elements in the vector so that it is legal. The
+  /// desired action in the first step is controlled by this function.
+  void setScalarInVectorAction(const unsigned Opcode, const unsigned TypeIndex,
+                               const SizeAndActionsVec &SizeAndActions) {
+    unsigned OpcodeIdx = Opcode - FirstOp;
+    SmallVector<SizeAndActionsVec, 1> &Actions =
+        ScalarInVectorActions[OpcodeIdx];
+    setActions(TypeIndex, Actions, SizeAndActions);
+  }
+
+  /// See also setScalarInVectorAction.
+  /// This function let's you specify the number of elements in a vector that
+  /// are legal for a legal element size.
+  void setVectorNumElementAction(const unsigned Opcode,
+                                 const unsigned TypeIndex,
+                                 const unsigned ElementSize,
+                                 const SizeAndActionsVec &SizeAndActions) {
+    const unsigned OpcodeIdx = Opcode - FirstOp;
+    if (NumElements2Actions[OpcodeIdx].find(ElementSize) ==
+        NumElements2Actions[OpcodeIdx].end())
+      NumElements2Actions[OpcodeIdx][ElementSize] = {{}};
+    SmallVector<SizeAndActionsVec, 1> &Actions =
+        NumElements2Actions[OpcodeIdx].find(ElementSize)->second;
+    setActions(TypeIndex, Actions, SizeAndActions);
+  }
+
+  /// A partial SizeAndActionsVec potentially doesn't cover all bit sizes,
+  /// i.e. it's OK if it doesn't start from size 1.
+  static void checkPartialSizeAndActionsVector(const SizeAndActionsVec& v) {
+    using namespace LegalizeActions;
+#ifndef NDEBUG
+    // The sizes should be in increasing order
+    int prev_size = -1;
+    for(auto SizeAndAction: v) {
+      assert(SizeAndAction.first > prev_size);
+      prev_size = SizeAndAction.first;
+    }
+    // - for every Widen action, there should be a larger bitsize that
+    //   can be legalized towards (e.g. Legal, Lower, Libcall or Custom
+    //   action).
+    // - for every Narrow action, there should be a smaller bitsize that
+    //   can be legalized towards.
+    int SmallestNarrowIdx = -1;
+    int LargestWidenIdx = -1;
+    int SmallestLegalizableToSameSizeIdx = -1;
+    int LargestLegalizableToSameSizeIdx = -1;
+    for(size_t i=0; i<v.size(); ++i) {
+      switch (v[i].second) {
+        case FewerElements:
+        case NarrowScalar:
+          if (SmallestNarrowIdx == -1)
+            SmallestNarrowIdx = i;
+          break;
+        case WidenScalar:
+        case MoreElements:
+          LargestWidenIdx = i;
+          break;
+        case Unsupported:
+          break;
+        default:
+          if (SmallestLegalizableToSameSizeIdx == -1)
+            SmallestLegalizableToSameSizeIdx = i;
+          LargestLegalizableToSameSizeIdx = i;
+      }
+    }
+    if (SmallestNarrowIdx != -1) {
+      assert(SmallestLegalizableToSameSizeIdx != -1);
+      assert(SmallestNarrowIdx > SmallestLegalizableToSameSizeIdx);
+    }
+    if (LargestWidenIdx != -1)
+      assert(LargestWidenIdx < LargestLegalizableToSameSizeIdx);
+#endif
+  }
+
+  /// A full SizeAndActionsVec must cover all bit sizes, i.e. must start with
+  /// from size 1.
+  static void checkFullSizeAndActionsVector(const SizeAndActionsVec& v) {
+#ifndef NDEBUG
+    // Data structure invariant: The first bit size must be size 1.
+    assert(v.size() >= 1);
+    assert(v[0].first == 1);
+    checkPartialSizeAndActionsVector(v);
+#endif
+  }
+
+  /// Sets actions for all bit sizes on a particular generic opcode, type
+  /// index and scalar or pointer type.
+  void setActions(unsigned TypeIndex,
+                  SmallVector<SizeAndActionsVec, 1> &Actions,
+                  const SizeAndActionsVec &SizeAndActions) {
+    checkFullSizeAndActionsVector(SizeAndActions);
+    if (Actions.size() <= TypeIndex)
+      Actions.resize(TypeIndex + 1);
+    Actions[TypeIndex] = SizeAndActions;
+  }
+
+  static SizeAndAction findAction(const SizeAndActionsVec &Vec,
+                                  const uint32_t Size);
+
+  /// Returns the next action needed to get the scalar or pointer type closer
+  /// to being legal
+  /// E.g. findLegalAction({G_REM, 13}) should return
+  /// (WidenScalar, 32). After that, findLegalAction({G_REM, 32}) will
+  /// probably be called, which should return (Lower, 32).
+  /// This is assuming the setScalarAction on G_REM was something like:
+  /// setScalarAction(G_REM, 0,
+  ///           {{1, WidenScalar},  // bit sizes [ 1, 31[
+  ///            {32, Lower},       // bit sizes [32, 33[
+  ///            {33, NarrowScalar} // bit sizes [65, +inf[
+  ///           });
+  std::pair<LegalizeAction, LLT>
+  findScalarLegalAction(const InstrAspect &Aspect) const;
+
+  /// Returns the next action needed towards legalizing the vector type.
+  std::pair<LegalizeAction, LLT>
+  findVectorLegalAction(const InstrAspect &Aspect) const;
+
+  static const int FirstOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_START;
+  static const int LastOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END;
+
+  // Data structures used temporarily during construction of legality data:
+  using TypeMap = DenseMap<LLT, LegalizeAction>;
+  SmallVector<TypeMap, 1> SpecifiedActions[LastOp - FirstOp + 1];
+  SmallVector<SizeChangeStrategy, 1>
+      ScalarSizeChangeStrategies[LastOp - FirstOp + 1];
+  SmallVector<SizeChangeStrategy, 1>
+      VectorElementSizeChangeStrategies[LastOp - FirstOp + 1];
+  bool TablesInitialized;
+
+  // Data structures used by getAction:
+  SmallVector<SizeAndActionsVec, 1> ScalarActions[LastOp - FirstOp + 1];
+  SmallVector<SizeAndActionsVec, 1> ScalarInVectorActions[LastOp - FirstOp + 1];
+  std::unordered_map<uint16_t, SmallVector<SizeAndActionsVec, 1>>
+      AddrSpace2PointerActions[LastOp - FirstOp + 1];
+  std::unordered_map<uint16_t, SmallVector<SizeAndActionsVec, 1>>
+      NumElements2Actions[LastOp - FirstOp + 1];
+
+  LegalizeRuleSet RulesForOpcode[LastOp - FirstOp + 1];
+};
+
+#ifndef NDEBUG
+/// Checks that MIR is fully legal, returns an illegal instruction if it's not,
+/// nullptr otherwise
+const MachineInstr *machineFunctionIsIllegal(const MachineFunction &MF);
+#endif
+
+} // end namespace llvm.
+
+#endif // LLVM_CODEGEN_GLOBALISEL_LEGALIZERINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h
new file mode 100644
index 0000000..0a46eb9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h
@@ -0,0 +1,78 @@
+//== llvm/CodeGen/GlobalISel/Localizer.h - Localizer -------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file describes the interface of the Localizer pass.
+/// This pass moves/duplicates constant-like instructions close to their uses.
+/// Its primarily goal is to workaround the deficiencies of the fast register
+/// allocator.
+/// With GlobalISel constants are all materialized in the entry block of
+/// a function. However, the fast allocator cannot rematerialize constants and
+/// has a lot more live-ranges to deal with and will most likely end up
+/// spilling a lot.
+/// By pushing the constants close to their use, we only create small
+/// live-ranges.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_LOCALIZER_H
+#define LLVM_CODEGEN_GLOBALISEL_LOCALIZER_H
+
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+// Forward declarations.
+class MachineRegisterInfo;
+
+/// This pass implements the localization mechanism described at the
+/// top of this file. One specificity of the implementation is that
+/// it will materialize one and only one instance of a constant per
+/// basic block, thus enabling reuse of that constant within that block.
+/// Moreover, it only materializes constants in blocks where they
+/// are used. PHI uses are considered happening at the end of the
+/// related predecessor.
+class Localizer : public MachineFunctionPass {
+public:
+  static char ID;
+
+private:
+  /// MRI contains all the register class/bank information that this
+  /// pass uses and updates.
+  MachineRegisterInfo *MRI;
+
+  /// Check whether or not \p MI needs to be moved close to its uses.
+  static bool shouldLocalize(const MachineInstr &MI);
+
+  /// Check if \p MOUse is used in the same basic block as \p Def.
+  /// If the use is in the same block, we say it is local.
+  /// When the use is not local, \p InsertMBB will contain the basic
+  /// block when to insert \p Def to have a local use.
+  static bool isLocalUse(MachineOperand &MOUse, const MachineInstr &Def,
+                         MachineBasicBlock *&InsertMBB);
+
+  /// Initialize the field members using \p MF.
+  void init(MachineFunction &MF);
+
+public:
+  Localizer();
+
+  StringRef getPassName() const override { return "Localizer"; }
+
+  MachineFunctionProperties getRequiredProperties() const override {
+    return MachineFunctionProperties()
+        .set(MachineFunctionProperties::Property::IsSSA)
+        .set(MachineFunctionProperties::Property::Legalized)
+        .set(MachineFunctionProperties::Property::RegBankSelected);
+  }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+};
+
+} // End namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
new file mode 100644
index 0000000..797f5e5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
@@ -0,0 +1,327 @@
+//== ----- llvm/CodeGen/GlobalISel/MIPatternMatch.h --------------------- == //
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// Contains matchers for matching SSA Machine Instructions.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_GMIR_PATTERNMATCH_H
+#define LLVM_GMIR_PATTERNMATCH_H
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+namespace llvm {
+namespace MIPatternMatch {
+
+template <typename Reg, typename Pattern>
+bool mi_match(Reg R, MachineRegisterInfo &MRI, Pattern &&P) {
+  return P.match(MRI, R);
+}
+
+// TODO: Extend for N use.
+template <typename SubPatternT> struct OneUse_match {
+  SubPatternT SubPat;
+  OneUse_match(const SubPatternT &SP) : SubPat(SP) {}
+
+  template <typename OpTy>
+  bool match(const MachineRegisterInfo &MRI, unsigned Reg) {
+    return MRI.hasOneUse(Reg) && SubPat.match(MRI, Reg);
+  }
+};
+
+template <typename SubPat>
+inline OneUse_match<SubPat> m_OneUse(const SubPat &SP) {
+  return SP;
+}
+
+struct ConstantMatch {
+  int64_t &CR;
+  ConstantMatch(int64_t &C) : CR(C) {}
+  bool match(const MachineRegisterInfo &MRI, unsigned Reg) {
+    if (auto MaybeCst = getConstantVRegVal(Reg, MRI)) {
+      CR = *MaybeCst;
+      return true;
+    }
+    return false;
+  }
+};
+
+inline ConstantMatch m_ICst(int64_t &Cst) { return ConstantMatch(Cst); }
+
+// TODO: Rework this for different kinds of MachineOperand.
+// Currently assumes the Src for a match is a register.
+// We might want to support taking in some MachineOperands and call getReg on
+// that.
+
+struct operand_type_match {
+  bool match(const MachineRegisterInfo &MRI, unsigned Reg) { return true; }
+  bool match(const MachineRegisterInfo &MRI, MachineOperand *MO) {
+    return MO->isReg();
+  }
+};
+
+inline operand_type_match m_Reg() { return operand_type_match(); }
+
+/// Matching combinators.
+template <typename... Preds> struct And {
+  template <typename MatchSrc>
+  bool match(MachineRegisterInfo &MRI, MatchSrc &&src) {
+    return true;
+  }
+};
+
+template <typename Pred, typename... Preds>
+struct And<Pred, Preds...> : And<Preds...> {
+  Pred P;
+  And(Pred &&p, Preds &&... preds)
+      : And<Preds...>(std::forward<Preds>(preds)...), P(std::forward<Pred>(p)) {
+  }
+  template <typename MatchSrc>
+  bool match(MachineRegisterInfo &MRI, MatchSrc &&src) {
+    return P.match(MRI, src) && And<Preds...>::match(MRI, src);
+  }
+};
+
+template <typename... Preds> struct Or {
+  template <typename MatchSrc>
+  bool match(MachineRegisterInfo &MRI, MatchSrc &&src) {
+    return false;
+  }
+};
+
+template <typename Pred, typename... Preds>
+struct Or<Pred, Preds...> : Or<Preds...> {
+  Pred P;
+  Or(Pred &&p, Preds &&... preds)
+      : Or<Preds...>(std::forward<Preds>(preds)...), P(std::forward<Pred>(p)) {}
+  template <typename MatchSrc>
+  bool match(MachineRegisterInfo &MRI, MatchSrc &&src) {
+    return P.match(MRI, src) || Or<Preds...>::match(MRI, src);
+  }
+};
+
+template <typename... Preds> And<Preds...> m_all_of(Preds &&... preds) {
+  return And<Preds...>(std::forward<Preds>(preds)...);
+}
+
+template <typename... Preds> Or<Preds...> m_any_of(Preds &&... preds) {
+  return Or<Preds...>(std::forward<Preds>(preds)...);
+}
+
+template <typename BindTy> struct bind_helper {
+  static bool bind(const MachineRegisterInfo &MRI, BindTy &VR, BindTy &V) {
+    VR = V;
+    return true;
+  }
+};
+
+template <> struct bind_helper<MachineInstr *> {
+  static bool bind(const MachineRegisterInfo &MRI, MachineInstr *&MI,
+                   unsigned Reg) {
+    MI = MRI.getVRegDef(Reg);
+    if (MI)
+      return true;
+    return false;
+  }
+};
+
+template <> struct bind_helper<LLT> {
+  static bool bind(const MachineRegisterInfo &MRI, LLT &Ty, unsigned Reg) {
+    Ty = MRI.getType(Reg);
+    if (Ty.isValid())
+      return true;
+    return false;
+  }
+};
+
+template <> struct bind_helper<const ConstantFP *> {
+  static bool bind(const MachineRegisterInfo &MRI, const ConstantFP *&F,
+                   unsigned Reg) {
+    F = getConstantFPVRegVal(Reg, MRI);
+    if (F)
+      return true;
+    return false;
+  }
+};
+
+template <typename Class> struct bind_ty {
+  Class &VR;
+
+  bind_ty(Class &V) : VR(V) {}
+
+  template <typename ITy> bool match(const MachineRegisterInfo &MRI, ITy &&V) {
+    return bind_helper<Class>::bind(MRI, VR, V);
+  }
+};
+
+inline bind_ty<unsigned> m_Reg(unsigned &R) { return R; }
+inline bind_ty<MachineInstr *> m_MInstr(MachineInstr *&MI) { return MI; }
+inline bind_ty<LLT> m_Type(LLT &Ty) { return Ty; }
+
+// Helper for matching G_FCONSTANT
+inline bind_ty<const ConstantFP *> m_GFCst(const ConstantFP *&C) { return C; }
+
+// General helper for all the binary generic MI such as G_ADD/G_SUB etc
+template <typename LHS_P, typename RHS_P, unsigned Opcode,
+          bool Commutable = false>
+struct BinaryOp_match {
+  LHS_P L;
+  RHS_P R;
+
+  BinaryOp_match(const LHS_P &LHS, const RHS_P &RHS) : L(LHS), R(RHS) {}
+  template <typename OpTy> bool match(MachineRegisterInfo &MRI, OpTy &&Op) {
+    MachineInstr *TmpMI;
+    if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
+      if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 3) {
+        return (L.match(MRI, TmpMI->getOperand(1).getReg()) &&
+                R.match(MRI, TmpMI->getOperand(2).getReg())) ||
+               (Commutable && (R.match(MRI, TmpMI->getOperand(1).getReg()) &&
+                               L.match(MRI, TmpMI->getOperand(2).getReg())));
+      }
+    }
+    return false;
+  }
+};
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_ADD, true>
+m_GAdd(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_ADD, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_SUB> m_GSub(const LHS &L,
+                                                            const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_SUB>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_MUL, true>
+m_GMul(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_MUL, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_FADD, true>
+m_GFAdd(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_FADD, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_FMUL, true>
+m_GFMul(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_FMUL, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_AND, true>
+m_GAnd(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_AND, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_OR, true> m_GOr(const LHS &L,
+                                                                const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_OR, true>(L, R);
+}
+
+// Helper for unary instructions (G_[ZSA]EXT/G_TRUNC) etc
+template <typename SrcTy, unsigned Opcode> struct UnaryOp_match {
+  SrcTy L;
+
+  UnaryOp_match(const SrcTy &LHS) : L(LHS) {}
+  template <typename OpTy> bool match(MachineRegisterInfo &MRI, OpTy &&Op) {
+    MachineInstr *TmpMI;
+    if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
+      if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 2) {
+        return L.match(MRI, TmpMI->getOperand(1).getReg());
+      }
+    }
+    return false;
+  }
+};
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_ANYEXT>
+m_GAnyExt(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_ANYEXT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_SEXT> m_GSExt(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_SEXT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_ZEXT> m_GZExt(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_ZEXT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_FPEXT> m_GFPExt(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_FPEXT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_TRUNC> m_GTrunc(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_TRUNC>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_BITCAST>
+m_GBitcast(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_BITCAST>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_PTRTOINT>
+m_GPtrToInt(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_PTRTOINT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_INTTOPTR>
+m_GIntToPtr(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_INTTOPTR>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_FPTRUNC>
+m_GFPTrunc(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_FPTRUNC>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_FABS> m_GFabs(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_FABS>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::COPY> m_Copy(SrcTy &&Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::COPY>(std::forward<SrcTy>(Src));
+}
+
+// Helper for checking if a Reg is of specific type.
+struct CheckType {
+  LLT Ty;
+  CheckType(const LLT &Ty) : Ty(Ty) {}
+
+  bool match(MachineRegisterInfo &MRI, unsigned Reg) {
+    return MRI.getType(Reg) == Ty;
+  }
+};
+
+inline CheckType m_SpecificType(LLT Ty) { return Ty; }
+
+} // namespace GMIPatternMatch
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
new file mode 100644
index 0000000..ef4e0ad
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -0,0 +1,808 @@
+//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.h - MIBuilder --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares the MachineIRBuilder class.
+/// This is a helper class to build MachineInstr.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H
+#define LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H
+
+#include "llvm/CodeGen/GlobalISel/Types.h"
+
+#include "llvm/CodeGen/LowLevelType.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugLoc.h"
+
+
+namespace llvm {
+
+// Forward declarations.
+class MachineFunction;
+class MachineInstr;
+class TargetInstrInfo;
+
+/// Helper class to build MachineInstr.
+/// It keeps internally the insertion point and debug location for all
+/// the new instructions we want to create.
+/// This information can be modify via the related setters.
+class MachineIRBuilder {
+  /// MachineFunction under construction.
+  MachineFunction *MF;
+  /// Information used to access the description of the opcodes.
+  const TargetInstrInfo *TII;
+  /// Information used to verify types are consistent and to create virtual registers.
+  MachineRegisterInfo *MRI;
+  /// Debug location to be set to any instruction we create.
+  DebugLoc DL;
+
+  /// \name Fields describing the insertion point.
+  /// @{
+  MachineBasicBlock *MBB;
+  MachineBasicBlock::iterator II;
+  /// @}
+
+  std::function<void(MachineInstr *)> InsertedInstr;
+
+  const TargetInstrInfo &getTII() {
+    assert(TII && "TargetInstrInfo is not set");
+    return *TII;
+  }
+
+  void validateTruncExt(unsigned Dst, unsigned Src, bool IsExtend);
+  MachineInstrBuilder buildBinaryOp(unsigned Opcode, unsigned Res, unsigned Op0, unsigned Op1);
+
+  unsigned getDestFromArg(unsigned Reg) { return Reg; }
+  unsigned getDestFromArg(LLT Ty) {
+    return getMF().getRegInfo().createGenericVirtualRegister(Ty);
+  }
+  unsigned getDestFromArg(const TargetRegisterClass *RC) {
+    return getMF().getRegInfo().createVirtualRegister(RC);
+  }
+
+  void addUseFromArg(MachineInstrBuilder &MIB, unsigned Reg) {
+    MIB.addUse(Reg);
+  }
+
+  void addUseFromArg(MachineInstrBuilder &MIB, const MachineInstrBuilder &UseMIB) {
+    MIB.addUse(UseMIB->getOperand(0).getReg());
+  }
+
+  void addUsesFromArgs(MachineInstrBuilder &MIB) { }
+  template<typename UseArgTy, typename ... UseArgsTy>
+  void addUsesFromArgs(MachineInstrBuilder &MIB, UseArgTy &&Arg1, UseArgsTy &&... Args) {
+    addUseFromArg(MIB, Arg1);
+    addUsesFromArgs(MIB, std::forward<UseArgsTy>(Args)...);
+  }
+  unsigned getRegFromArg(unsigned Reg) { return Reg; }
+  unsigned getRegFromArg(const MachineInstrBuilder &MIB) {
+    return MIB->getOperand(0).getReg();
+  }
+
+public:
+  /// Some constructors for easy use.
+  MachineIRBuilder() = default;
+  MachineIRBuilder(MachineFunction &MF) { setMF(MF); }
+  MachineIRBuilder(MachineInstr &MI) : MachineIRBuilder(*MI.getMF()) {
+    setInstr(MI);
+  }
+
+  /// Getter for the function we currently build.
+  MachineFunction &getMF() {
+    assert(MF && "MachineFunction is not set");
+    return *MF;
+  }
+
+  /// Getter for the basic block we currently build.
+  MachineBasicBlock &getMBB() {
+    assert(MBB && "MachineBasicBlock is not set");
+    return *MBB;
+  }
+
+  /// Current insertion point for new instructions.
+  MachineBasicBlock::iterator getInsertPt() {
+    return II;
+  }
+
+  /// Set the insertion point before the specified position.
+  /// \pre MBB must be in getMF().
+  /// \pre II must be a valid iterator in MBB.
+  void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II);
+  /// @}
+
+  /// \name Setters for the insertion point.
+  /// @{
+  /// Set the MachineFunction where to build instructions.
+  void setMF(MachineFunction &);
+
+  /// Set the insertion point to the  end of \p MBB.
+  /// \pre \p MBB must be contained by getMF().
+  void setMBB(MachineBasicBlock &MBB);
+
+  /// Set the insertion point to before MI.
+  /// \pre MI must be in getMF().
+  void setInstr(MachineInstr &MI);
+  /// @}
+
+  /// \name Control where instructions we create are recorded (typically for
+  /// visiting again later during legalization).
+  /// @{
+  void recordInsertions(std::function<void(MachineInstr *)> InsertedInstr);
+  void stopRecordingInsertions();
+  /// @}
+
+  /// Set the debug location to \p DL for all the next build instructions.
+  void setDebugLoc(const DebugLoc &DL) { this->DL = DL; }
+
+  /// Get the current instruction's debug location.
+  DebugLoc getDebugLoc() { return DL; }
+
+  /// Build and insert <empty> = \p Opcode <empty>.
+  /// The insertion point is the one set by the last call of either
+  /// setBasicBlock or setMI.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildInstr(unsigned Opcode);
+
+  /// DAG like Generic method for building arbitrary instructions as above.
+  /// \Opc opcode for the instruction.
+  /// \Ty Either LLT/TargetRegisterClass/unsigned types for Dst
+  /// \Args Variadic list of uses of types(unsigned/MachineInstrBuilder)
+  /// Uses of type MachineInstrBuilder will perform
+  /// getOperand(0).getReg() to convert to register.
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty,
+                                 UseArgsTy &&... Args) {
+    auto MIB = buildInstr(Opc).addDef(getDestFromArg(Ty));
+    addUsesFromArgs(MIB, std::forward<UseArgsTy>(Args)...);
+    return MIB;
+  }
+
+  /// Build but don't insert <empty> = \p Opcode <empty>.
+  ///
+  /// \pre setMF, setBasicBlock or setMI  must have been called.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildInstrNoInsert(unsigned Opcode);
+
+  /// Insert an existing instruction at the insertion point.
+  MachineInstrBuilder insertInstr(MachineInstrBuilder MIB);
+
+  /// Build and insert a DBG_VALUE instruction expressing the fact that the
+  /// associated \p Variable lives in \p Reg (suitably modified by \p Expr).
+  MachineInstrBuilder buildDirectDbgValue(unsigned Reg, const MDNode *Variable,
+                                          const MDNode *Expr);
+
+  /// Build and insert a DBG_VALUE instruction expressing the fact that the
+  /// associated \p Variable lives in memory at \p Reg (suitably modified by \p
+  /// Expr).
+  MachineInstrBuilder buildIndirectDbgValue(unsigned Reg,
+                                            const MDNode *Variable,
+                                            const MDNode *Expr);
+
+  /// Build and insert a DBG_VALUE instruction expressing the fact that the
+  /// associated \p Variable lives in the stack slot specified by \p FI
+  /// (suitably modified by \p Expr).
+  MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable,
+                                      const MDNode *Expr);
+
+  /// Build and insert a DBG_VALUE instructions specifying that \p Variable is
+  /// given by \p C (suitably modified by \p Expr).
+  MachineInstrBuilder buildConstDbgValue(const Constant &C,
+                                         const MDNode *Variable,
+                                         const MDNode *Expr);
+
+  /// Build and insert \p Res = G_FRAME_INDEX \p Idx
+  ///
+  /// G_FRAME_INDEX materializes the address of an alloca value or other
+  /// stack-based object.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with pointer type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx);
+
+  /// Build and insert \p Res = G_GLOBAL_VALUE \p GV
+  ///
+  /// G_GLOBAL_VALUE materializes the address of the specified global
+  /// into \p Res.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with pointer type
+  ///      in the same address space as \p GV.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildGlobalValue(unsigned Res, const GlobalValue *GV);
+
+  /// Build and insert \p Res = G_ADD \p Op0, \p Op1
+  ///
+  /// G_ADD sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+  /// truncated to their width.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildAdd(unsigned Res, unsigned Op0,
+                               unsigned Op1);
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildAdd(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+    unsigned Res = getDestFromArg(Ty);
+    return buildAdd(Res, (getRegFromArg(UseArgs))...);
+  }
+
+  /// Build and insert \p Res = G_SUB \p Op0, \p Op1
+  ///
+  /// G_SUB sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+  /// truncated to their width.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildSub(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+    unsigned Res = getDestFromArg(Ty);
+    return buildSub(Res, (getRegFromArg(UseArgs))...);
+  }
+  MachineInstrBuilder buildSub(unsigned Res, unsigned Op0,
+                               unsigned Op1);
+
+  /// Build and insert \p Res = G_MUL \p Op0, \p Op1
+  ///
+  /// G_MUL sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+  /// truncated to their width.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildMul(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+    unsigned Res = getDestFromArg(Ty);
+    return buildMul(Res, (getRegFromArg(UseArgs))...);
+  }
+  MachineInstrBuilder buildMul(unsigned Res, unsigned Op0,
+                               unsigned Op1);
+
+  /// Build and insert \p Res = G_GEP \p Op0, \p Op1
+  ///
+  /// G_GEP adds \p Op1 bytes to the pointer specified by \p Op0,
+  /// storing the resulting pointer in \p Res.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res and \p Op0 must be generic virtual registers with pointer
+  ///      type.
+  /// \pre \p Op1 must be a generic virtual register with scalar type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0,
+                               unsigned Op1);
+
+  /// Materialize and insert \p Res = G_GEP \p Op0, (G_CONSTANT \p Value)
+  ///
+  /// G_GEP adds \p Value bytes to the pointer specified by \p Op0,
+  /// storing the resulting pointer in \p Res. If \p Value is zero then no
+  /// G_GEP or G_CONSTANT will be created and \pre Op0 will be assigned to
+  /// \p Res.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Op0 must be a generic virtual register with pointer type.
+  /// \pre \p ValueTy must be a scalar type.
+  /// \pre \p Res must be 0. This is to detect confusion between
+  ///      materializeGEP() and buildGEP().
+  /// \post \p Res will either be a new generic virtual register of the same
+  ///       type as \p Op0 or \p Op0 itself.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  Optional<MachineInstrBuilder> materializeGEP(unsigned &Res, unsigned Op0,
+                                               const LLT &ValueTy,
+                                               uint64_t Value);
+
+  /// Build and insert \p Res = G_PTR_MASK \p Op0, \p NumBits
+  ///
+  /// G_PTR_MASK clears the low bits of a pointer operand without destroying its
+  /// pointer properties. This has the effect of rounding the address *down* to
+  /// a specified alignment in bits.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res and \p Op0 must be generic virtual registers with pointer
+  ///      type.
+  /// \pre \p NumBits must be an integer representing the number of low bits to
+  ///      be cleared in \p Op0.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildPtrMask(unsigned Res, unsigned Op0,
+                                   uint32_t NumBits);
+
+  /// Build and insert \p Res, \p CarryOut = G_UADDE \p Op0,
+  /// \p Op1, \p CarryIn
+  ///
+  /// G_UADDE sets \p Res to \p Op0 + \p Op1 + \p CarryIn (truncated to the bit
+  /// width) and sets \p CarryOut to 1 if the result overflowed in unsigned
+  /// arithmetic.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same scalar type.
+  /// \pre \p CarryOut and \p CarryIn must be generic virtual
+  ///      registers with the same scalar type (typically s1)
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildUAdde(unsigned Res, unsigned CarryOut, unsigned Op0,
+                                 unsigned Op1, unsigned CarryIn);
+
+  /// Build and insert \p Res = G_AND \p Op0, \p Op1
+  ///
+  /// G_AND sets \p Res to the bitwise and of integer parameters \p Op0 and \p
+  /// Op1.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildAnd(DstTy &&Dst, UseArgsTy &&... UseArgs) {
+    return buildAnd(getDestFromArg(Dst), getRegFromArg(UseArgs)...);
+  }
+  MachineInstrBuilder buildAnd(unsigned Res, unsigned Op0,
+                               unsigned Op1);
+
+  /// Build and insert \p Res = G_OR \p Op0, \p Op1
+  ///
+  /// G_OR sets \p Res to the bitwise or of integer parameters \p Op0 and \p
+  /// Op1.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildOr(DstTy &&Dst, UseArgsTy &&... UseArgs) {
+    return buildOr(getDestFromArg(Dst), getRegFromArg(UseArgs)...);
+  }
+  MachineInstrBuilder buildOr(unsigned Res, unsigned Op0, unsigned Op1);
+
+  /// Build and insert \p Res = G_ANYEXT \p Op0
+  ///
+  /// G_ANYEXT produces a register of the specified width, with bits 0 to
+  /// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are unspecified
+  /// (i.e. this is neither zero nor sign-extension). For a vector register,
+  /// each element is extended individually.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be smaller than \p Res
+  ///
+  /// \return The newly created instruction.
+
+  MachineInstrBuilder buildAnyExt(unsigned Res, unsigned Op);
+  template <typename DstType, typename ArgType>
+  MachineInstrBuilder buildAnyExt(DstType &&Res, ArgType &&Arg) {
+    return buildAnyExt(getDestFromArg(Res), getRegFromArg(Arg));
+  }
+
+  /// Build and insert \p Res = G_SEXT \p Op
+  ///
+  /// G_SEXT produces a register of the specified width, with bits 0 to
+  /// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are duplicated from the
+  /// high bit of \p Op (i.e. 2s-complement sign extended).
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be smaller than \p Res
+  ///
+  /// \return The newly created instruction.
+  template <typename DstType, typename ArgType>
+  MachineInstrBuilder buildSExt(DstType &&Res, ArgType &&Arg) {
+    return buildSExt(getDestFromArg(Res), getRegFromArg(Arg));
+  }
+  MachineInstrBuilder buildSExt(unsigned Res, unsigned Op);
+
+  /// Build and insert \p Res = G_ZEXT \p Op
+  ///
+  /// G_ZEXT produces a register of the specified width, with bits 0 to
+  /// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are 0. For a vector
+  /// register, each element is extended individually.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be smaller than \p Res
+  ///
+  /// \return The newly created instruction.
+  template <typename DstType, typename ArgType>
+  MachineInstrBuilder buildZExt(DstType &&Res, ArgType &&Arg) {
+    return buildZExt(getDestFromArg(Res), getRegFromArg(Arg));
+  }
+  MachineInstrBuilder buildZExt(unsigned Res, unsigned Op);
+
+  /// Build and insert \p Res = G_SEXT \p Op, \p Res = G_TRUNC \p Op, or
+  /// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
+  ///  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  ///
+  /// \return The newly created instruction.
+  template <typename DstTy, typename UseArgTy>
+  MachineInstrBuilder buildSExtOrTrunc(DstTy &&Dst, UseArgTy &&Use) {
+    return buildSExtOrTrunc(getDestFromArg(Dst), getRegFromArg(Use));
+  }
+  MachineInstrBuilder buildSExtOrTrunc(unsigned Res, unsigned Op);
+
+  /// Build and insert \p Res = G_ZEXT \p Op, \p Res = G_TRUNC \p Op, or
+  /// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
+  ///  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  ///
+  /// \return The newly created instruction.
+  template <typename DstTy, typename UseArgTy>
+  MachineInstrBuilder buildZExtOrTrunc(DstTy &&Dst, UseArgTy &&Use) {
+    return buildZExtOrTrunc(getDestFromArg(Dst), getRegFromArg(Use));
+  }
+  MachineInstrBuilder buildZExtOrTrunc(unsigned Res, unsigned Op);
+
+  // Build and insert \p Res = G_ANYEXT \p Op, \p Res = G_TRUNC \p Op, or
+  /// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
+  ///  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  ///
+  /// \return The newly created instruction.
+  template <typename DstTy, typename UseArgTy>
+  MachineInstrBuilder buildAnyExtOrTrunc(DstTy &&Dst, UseArgTy &&Use) {
+    return buildAnyExtOrTrunc(getDestFromArg(Dst), getRegFromArg(Use));
+  }
+  MachineInstrBuilder buildAnyExtOrTrunc(unsigned Res, unsigned Op);
+
+  /// Build and insert \p Res = \p ExtOpc, \p Res = G_TRUNC \p
+  /// Op, or \p Res = COPY \p Op depending on the differing sizes of \p Res and
+  /// \p Op.
+  ///  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, unsigned Res,
+                                      unsigned Op);
+
+  /// Build and insert an appropriate cast between two registers of equal size.
+  template <typename DstType, typename ArgType>
+  MachineInstrBuilder buildCast(DstType &&Res, ArgType &&Arg) {
+    return buildCast(getDestFromArg(Res), getRegFromArg(Arg));
+  }
+  MachineInstrBuilder buildCast(unsigned Dst, unsigned Src);
+
+  /// Build and insert G_BR \p Dest
+  ///
+  /// G_BR is an unconditional branch to \p Dest.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildBr(MachineBasicBlock &BB);
+
+  /// Build and insert G_BRCOND \p Tst, \p Dest
+  ///
+  /// G_BRCOND is a conditional branch to \p Dest.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Tst must be a generic virtual register with scalar
+  ///      type. At the beginning of legalization, this will be a single
+  ///      bit (s1). Targets with interesting flags registers may change
+  ///      this. For a wider type, whether the branch is taken must only
+  ///      depend on bit 0 (for now).
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &BB);
+
+  /// Build and insert G_BRINDIRECT \p Tgt
+  ///
+  /// G_BRINDIRECT is an indirect branch to \p Tgt.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Tgt must be a generic virtual register with pointer type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildBrIndirect(unsigned Tgt);
+
+  /// Build and insert \p Res = G_CONSTANT \p Val
+  ///
+  /// G_CONSTANT is an integer constant with the specified size and value. \p
+  /// Val will be extended or truncated to the size of \p Reg.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or pointer
+  ///      type.
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildConstant(unsigned Res, const ConstantInt &Val);
+
+  /// Build and insert \p Res = G_CONSTANT \p Val
+  ///
+  /// G_CONSTANT is an integer constant with the specified size and value.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar type.
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildConstant(unsigned Res, int64_t Val);
+
+  template <typename DstType>
+  MachineInstrBuilder buildConstant(DstType &&Res, int64_t Val) {
+    return buildConstant(getDestFromArg(Res), Val);
+  }
+  /// Build and insert \p Res = G_FCONSTANT \p Val
+  ///
+  /// G_FCONSTANT is a floating-point constant with the specified size and
+  /// value.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar type.
+  ///
+  /// \return The newly created instruction.
+  template <typename DstType>
+  MachineInstrBuilder buildFConstant(DstType &&Res, const ConstantFP &Val) {
+    return buildFConstant(getDestFromArg(Res), Val);
+  }
+  MachineInstrBuilder buildFConstant(unsigned Res, const ConstantFP &Val);
+
+  template <typename DstType>
+  MachineInstrBuilder buildFConstant(DstType &&Res, double Val) {
+    return buildFConstant(getDestFromArg(Res), Val);
+  }
+  MachineInstrBuilder buildFConstant(unsigned Res, double Val);
+
+  /// Build and insert \p Res = COPY Op
+  ///
+  /// Register-to-register COPY sets \p Res to \p Op.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildCopy(unsigned Res, unsigned Op);
+  template <typename DstType, typename SrcType>
+  MachineInstrBuilder buildCopy(DstType &&Res, SrcType &&Src) {
+    return buildCopy(getDestFromArg(Res), getRegFromArg(Src));
+  }
+
+  /// Build and insert `Res = G_LOAD Addr, MMO`.
+  ///
+  /// Loads the value stored at \p Addr. Puts the result in \p Res.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr,
+                                MachineMemOperand &MMO);
+
+  /// Build and insert `G_STORE Val, Addr, MMO`.
+  ///
+  /// Stores the value \p Val to \p Addr.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Val must be a generic virtual register.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildStore(unsigned Val, unsigned Addr,
+                                 MachineMemOperand &MMO);
+
+  /// Build and insert `Res0, ... = G_EXTRACT Src, Idx0`.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res and \p Src must be generic virtual registers.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildExtract(unsigned Res, unsigned Src, uint64_t Index);
+
+  /// Build and insert \p Res = IMPLICIT_DEF.
+  template <typename DstType> MachineInstrBuilder buildUndef(DstType &&Res) {
+    return buildUndef(getDestFromArg(Res));
+  }
+  MachineInstrBuilder buildUndef(unsigned Dst);
+
+  /// Build and insert instructions to put \p Ops together at the specified p
+  /// Indices to form a larger register.
+  ///
+  /// If the types of the input registers are uniform and cover the entirity of
+  /// \p Res then a G_MERGE_VALUES will be produced. Otherwise an IMPLICIT_DEF
+  /// followed by a sequence of G_INSERT instructions.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre The final element of the sequence must not extend past the end of the
+  ///      destination register.
+  /// \pre The bits defined by each Op (derived from index and scalar size) must
+  ///      not overlap.
+  /// \pre \p Indices must be in ascending order of bit position.
+  void buildSequence(unsigned Res, ArrayRef<unsigned> Ops,
+                     ArrayRef<uint64_t> Indices);
+
+  /// Build and insert \p Res = G_MERGE_VALUES \p Op0, ...
+  ///
+  /// G_MERGE_VALUES combines the input elements contiguously into a larger
+  /// register.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre The entire register \p Res (and no more) must be covered by the input
+  ///      registers.
+  /// \pre The type of all \p Ops registers must be identical.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildMerge(unsigned Res, ArrayRef<unsigned> Ops);
+
+  /// Build and insert \p Res0, ... = G_UNMERGE_VALUES \p Op
+  ///
+  /// G_UNMERGE_VALUES splits contiguous bits of the input into multiple
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre The entire register \p Res (and no more) must be covered by the input
+  ///      registers.
+  /// \pre The type of all \p Res registers must be identical.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildUnmerge(ArrayRef<unsigned> Res, unsigned Op);
+
+  MachineInstrBuilder buildInsert(unsigned Res, unsigned Src,
+                                  unsigned Op, unsigned Index);
+
+  /// Build and insert either a G_INTRINSIC (if \p HasSideEffects is false) or
+  /// G_INTRINSIC_W_SIDE_EFFECTS instruction. Its first operand will be the
+  /// result register definition unless \p Reg is NoReg (== 0). The second
+  /// operand will be the intrinsic's ID.
+  ///
+  /// Callers are expected to add the required definitions and uses afterwards.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, unsigned Res,
+                                     bool HasSideEffects);
+
+  /// Build and insert \p Res = G_FPTRUNC \p Op
+  ///
+  /// G_FPTRUNC converts a floating-point value into one with a smaller type.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  /// \pre \p Res must be smaller than \p Op
+  ///
+  /// \return The newly created instruction.
+  template <typename DstType, typename SrcType>
+  MachineInstrBuilder buildFPTrunc(DstType &&Res, SrcType &&Src) {
+    return buildFPTrunc(getDestFromArg(Res), getRegFromArg(Src));
+  }
+  MachineInstrBuilder buildFPTrunc(unsigned Res, unsigned Op);
+
+  /// Build and insert \p Res = G_TRUNC \p Op
+  ///
+  /// G_TRUNC extracts the low bits of a type. For a vector type each element is
+  /// truncated independently before being packed into the destination.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  /// \pre \p Res must be smaller than \p Op
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildTrunc(unsigned Res, unsigned Op);
+  template <typename DstType, typename SrcType>
+  MachineInstrBuilder buildTrunc(DstType &&Res, SrcType &&Src) {
+    return buildTrunc(getDestFromArg(Res), getRegFromArg(Src));
+  }
+
+  /// Build and insert a \p Res = G_ICMP \p Pred, \p Op0, \p Op1
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+
+  /// \pre \p Res must be a generic virtual register with scalar or
+  ///      vector type. Typically this starts as s1 or <N x s1>.
+  /// \pre \p Op0 and Op1 must be generic virtual registers with the
+  ///      same number of elements as \p Res. If \p Res is a scalar,
+  ///      \p Op0 must be either a scalar or pointer.
+  /// \pre \p Pred must be an integer predicate.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildICmp(CmpInst::Predicate Pred,
+                                unsigned Res, unsigned Op0, unsigned Op1);
+
+  /// Build and insert a \p Res = G_FCMP \p Pred\p Op0, \p Op1
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+
+  /// \pre \p Res must be a generic virtual register with scalar or
+  ///      vector type. Typically this starts as s1 or <N x s1>.
+  /// \pre \p Op0 and Op1 must be generic virtual registers with the
+  ///      same number of elements as \p Res (or scalar, if \p Res is
+  ///      scalar).
+  /// \pre \p Pred must be a floating-point predicate.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred,
+                                unsigned Res, unsigned Op0, unsigned Op1);
+
+  /// Build and insert a \p Res = G_SELECT \p Tst, \p Op0, \p Op1
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same type.
+  /// \pre \p Tst must be a generic virtual register with scalar, pointer or
+  ///      vector type. If vector then it must have the same number of
+  ///      elements as the other parameters.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildSelect(unsigned Res, unsigned Tst,
+                                  unsigned Op0, unsigned Op1);
+
+  /// Build and insert \p Res = G_INSERT_VECTOR_ELT \p Val,
+  /// \p Elt, \p Idx
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res and \p Val must be a generic virtual register
+  //       with the same vector type.
+  /// \pre \p Elt and \p Idx must be a generic virtual register
+  ///      with scalar type.
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildInsertVectorElement(unsigned Res, unsigned Val,
+                                               unsigned Elt, unsigned Idx);
+
+  /// Build and insert \p Res = G_EXTRACT_VECTOR_ELT \p Val, \p Idx
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar type.
+  /// \pre \p Val must be a generic virtual register with vector type.
+  /// \pre \p Idx must be a generic virtual register with scalar type.
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildExtractVectorElement(unsigned Res, unsigned Val,
+                                                unsigned Idx);
+
+  /// Build and insert `OldValRes = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal,
+  /// MMO`.
+  ///
+  /// Atomically replace the value at \p Addr with \p NewVal if it is currently
+  /// \p CmpVal otherwise leaves it unchanged. Puts the original value from \p
+  /// Addr in \p Res.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p OldValRes must be a generic virtual register of scalar type.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  /// \pre \p OldValRes, \p CmpVal, and \p NewVal must be generic virtual
+  ///      registers of the same type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildAtomicCmpXchg(unsigned OldValRes, unsigned Addr,
+                                         unsigned CmpVal, unsigned NewVal,
+                                         MachineMemOperand &MMO);
+};
+
+} // End namespace llvm.
+#endif // LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegBankSelect.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
new file mode 100644
index 0000000..c53ae41
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
@@ -0,0 +1,665 @@
+//=- llvm/CodeGen/GlobalISel/RegBankSelect.h - Reg Bank Selector --*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file describes the interface of the MachineFunctionPass
+/// responsible for assigning the generic virtual registers to register bank.
+
+/// By default, the reg bank selector relies on local decisions to
+/// assign the register bank. In other words, it looks at one instruction
+/// at a time to decide where the operand of that instruction should live.
+///
+/// At higher optimization level, we could imagine that the reg bank selector
+/// would use more global analysis and do crazier thing like duplicating
+/// instructions and so on. This is future work.
+///
+/// For now, the pass uses a greedy algorithm to decide where the operand
+/// of an instruction should live. It asks the target which banks may be
+/// used for each operand of the instruction and what is the cost. Then,
+/// it chooses the solution which minimize the cost of the instruction plus
+/// the cost of any move that may be needed to the values into the right
+/// register bank.
+/// In other words, the cost for an instruction on a register bank RegBank
+/// is: Cost of I on RegBank plus the sum of the cost for bringing the
+/// input operands from their current register bank to RegBank.
+/// Thus, the following formula:
+/// cost(I, RegBank) = cost(I.Opcode, RegBank) +
+///    sum(for each arg in I.arguments: costCrossCopy(arg.RegBank, RegBank))
+///
+/// E.g., Let say we are assigning the register bank for the instruction
+/// defining v2.
+/// v0(A_REGBANK) = ...
+/// v1(A_REGBANK) = ...
+/// v2 = G_ADD i32 v0, v1 <-- MI
+///
+/// The target may say it can generate G_ADD i32 on register bank A and B
+/// with a cost of respectively 5 and 1.
+/// Then, let say the cost of a cross register bank copies from A to B is 1.
+/// The reg bank selector would compare the following two costs:
+/// cost(MI, A_REGBANK) = cost(G_ADD, A_REGBANK) + cost(v0.RegBank, A_REGBANK) +
+///    cost(v1.RegBank, A_REGBANK)
+///                     = 5 + cost(A_REGBANK, A_REGBANK) + cost(A_REGBANK,
+///                                                             A_REGBANK)
+///                     = 5 + 0 + 0 = 5
+/// cost(MI, B_REGBANK) = cost(G_ADD, B_REGBANK) + cost(v0.RegBank, B_REGBANK) +
+///    cost(v1.RegBank, B_REGBANK)
+///                     = 1 + cost(A_REGBANK, B_REGBANK) + cost(A_REGBANK,
+///                                                             B_REGBANK)
+///                     = 1 + 1 + 1 = 3
+/// Therefore, in this specific example, the reg bank selector would choose
+/// bank B for MI.
+/// v0(A_REGBANK) = ...
+/// v1(A_REGBANK) = ...
+/// tmp0(B_REGBANK) = COPY v0
+/// tmp1(B_REGBANK) = COPY v1
+/// v2(B_REGBANK) = G_ADD i32 tmp0, tmp1
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_REGBANKSELECT_H
+#define LLVM_CODEGEN_GLOBALISEL_REGBANKSELECT_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
+#include <cassert>
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+
+class BlockFrequency;
+class MachineBlockFrequencyInfo;
+class MachineBranchProbabilityInfo;
+class MachineOperand;
+class MachineRegisterInfo;
+class Pass;
+class raw_ostream;
+class TargetPassConfig;
+class TargetRegisterInfo;
+
+/// This pass implements the reg bank selector pass used in the GlobalISel
+/// pipeline. At the end of this pass, all register operands have been assigned
+class RegBankSelect : public MachineFunctionPass {
+public:
+  static char ID;
+
+  /// List of the modes supported by the RegBankSelect pass.
+  enum Mode {
+    /// Assign the register banks as fast as possible (default).
+    Fast,
+    /// Greedily minimize the cost of assigning register banks.
+    /// This should produce code of greater quality, but will
+    /// require more compile time.
+    Greedy
+  };
+
+  /// Abstract class used to represent an insertion point in a CFG.
+  /// This class records an insertion point and materializes it on
+  /// demand.
+  /// It allows to reason about the frequency of this insertion point,
+  /// without having to logically materialize it (e.g., on an edge),
+  /// before we actually need to insert something.
+  class InsertPoint {
+  protected:
+    /// Tell if the insert point has already been materialized.
+    bool WasMaterialized = false;
+
+    /// Materialize the insertion point.
+    ///
+    /// If isSplit() is true, this involves actually splitting
+    /// the block or edge.
+    ///
+    /// \post getPointImpl() returns a valid iterator.
+    /// \post getInsertMBBImpl() returns a valid basic block.
+    /// \post isSplit() == false ; no more splitting should be required.
+    virtual void materialize() = 0;
+
+    /// Return the materialized insertion basic block.
+    /// Code will be inserted into that basic block.
+    ///
+    /// \pre ::materialize has been called.
+    virtual MachineBasicBlock &getInsertMBBImpl() = 0;
+
+    /// Return the materialized insertion point.
+    /// Code will be inserted before that point.
+    ///
+    /// \pre ::materialize has been called.
+    virtual MachineBasicBlock::iterator getPointImpl() = 0;
+
+  public:
+    virtual ~InsertPoint() = default;
+
+    /// The first call to this method will cause the splitting to
+    /// happen if need be, then sub sequent calls just return
+    /// the iterator to that point. I.e., no more splitting will
+    /// occur.
+    ///
+    /// \return The iterator that should be used with
+    /// MachineBasicBlock::insert. I.e., additional code happens
+    /// before that point.
+    MachineBasicBlock::iterator getPoint() {
+      if (!WasMaterialized) {
+        WasMaterialized = true;
+        assert(canMaterialize() && "Impossible to materialize this point");
+        materialize();
+      }
+      // When we materialized the point we should have done the splitting.
+      assert(!isSplit() && "Wrong pre-condition");
+      return getPointImpl();
+    }
+
+    /// The first call to this method will cause the splitting to
+    /// happen if need be, then sub sequent calls just return
+    /// the basic block that contains the insertion point.
+    /// I.e., no more splitting will occur.
+    ///
+    /// \return The basic block should be used with
+    /// MachineBasicBlock::insert and ::getPoint. The new code should
+    /// happen before that point.
+    MachineBasicBlock &getInsertMBB() {
+      if (!WasMaterialized) {
+        WasMaterialized = true;
+        assert(canMaterialize() && "Impossible to materialize this point");
+        materialize();
+      }
+      // When we materialized the point we should have done the splitting.
+      assert(!isSplit() && "Wrong pre-condition");
+      return getInsertMBBImpl();
+    }
+
+    /// Insert \p MI in the just before ::getPoint()
+    MachineBasicBlock::iterator insert(MachineInstr &MI) {
+      return getInsertMBB().insert(getPoint(), &MI);
+    }
+
+    /// Does this point involve splitting an edge or block?
+    /// As soon as ::getPoint is called and thus, the point
+    /// materialized, the point will not require splitting anymore,
+    /// i.e., this will return false.
+    virtual bool isSplit() const { return false; }
+
+    /// Frequency of the insertion point.
+    /// \p P is used to access the various analysis that will help to
+    /// get that information, like MachineBlockFrequencyInfo.  If \p P
+    /// does not contain enough enough to return the actual frequency,
+    /// this returns 1.
+    virtual uint64_t frequency(const Pass &P) const { return 1; }
+
+    /// Check whether this insertion point can be materialized.
+    /// As soon as ::getPoint is called and thus, the point materialized
+    /// calling this method does not make sense.
+    virtual bool canMaterialize() const { return false; }
+  };
+
+  /// Insertion point before or after an instruction.
+  class InstrInsertPoint : public InsertPoint {
+  private:
+    /// Insertion point.
+    MachineInstr &Instr;
+
+    /// Does the insertion point is before or after Instr.
+    bool Before;
+
+    void materialize() override;
+
+    MachineBasicBlock::iterator getPointImpl() override {
+      if (Before)
+        return Instr;
+      return Instr.getNextNode() ? *Instr.getNextNode()
+                                 : Instr.getParent()->end();
+    }
+
+    MachineBasicBlock &getInsertMBBImpl() override {
+      return *Instr.getParent();
+    }
+
+  public:
+    /// Create an insertion point before (\p Before=true) or after \p Instr.
+    InstrInsertPoint(MachineInstr &Instr, bool Before = true);
+
+    bool isSplit() const override;
+    uint64_t frequency(const Pass &P) const override;
+
+    // Worst case, we need to slice the basic block, but that is still doable.
+    bool canMaterialize() const override { return true; }
+  };
+
+  /// Insertion point at the beginning or end of a basic block.
+  class MBBInsertPoint : public InsertPoint {
+  private:
+    /// Insertion point.
+    MachineBasicBlock &MBB;
+
+    /// Does the insertion point is at the beginning or end of MBB.
+    bool Beginning;
+
+    void materialize() override { /*Nothing to do to materialize*/
+    }
+
+    MachineBasicBlock::iterator getPointImpl() override {
+      return Beginning ? MBB.begin() : MBB.end();
+    }
+
+    MachineBasicBlock &getInsertMBBImpl() override { return MBB; }
+
+  public:
+    MBBInsertPoint(MachineBasicBlock &MBB, bool Beginning = true)
+        : InsertPoint(), MBB(MBB), Beginning(Beginning) {
+      // If we try to insert before phis, we should use the insertion
+      // points on the incoming edges.
+      assert((!Beginning || MBB.getFirstNonPHI() == MBB.begin()) &&
+             "Invalid beginning point");
+      // If we try to insert after the terminators, we should use the
+      // points on the outcoming edges.
+      assert((Beginning || MBB.getFirstTerminator() == MBB.end()) &&
+             "Invalid end point");
+    }
+
+    bool isSplit() const override { return false; }
+    uint64_t frequency(const Pass &P) const override;
+    bool canMaterialize() const override { return true; };
+  };
+
+  /// Insertion point on an edge.
+  class EdgeInsertPoint : public InsertPoint {
+  private:
+    /// Source of the edge.
+    MachineBasicBlock &Src;
+
+    /// Destination of the edge.
+    /// After the materialization is done, this hold the basic block
+    /// that resulted from the splitting.
+    MachineBasicBlock *DstOrSplit;
+
+    /// P is used to update the analysis passes as applicable.
+    Pass &P;
+
+    void materialize() override;
+
+    MachineBasicBlock::iterator getPointImpl() override {
+      // DstOrSplit should be the Split block at this point.
+      // I.e., it should have one predecessor, Src, and one successor,
+      // the original Dst.
+      assert(DstOrSplit && DstOrSplit->isPredecessor(&Src) &&
+             DstOrSplit->pred_size() == 1 && DstOrSplit->succ_size() == 1 &&
+             "Did not split?!");
+      return DstOrSplit->begin();
+    }
+
+    MachineBasicBlock &getInsertMBBImpl() override { return *DstOrSplit; }
+
+  public:
+    EdgeInsertPoint(MachineBasicBlock &Src, MachineBasicBlock &Dst, Pass &P)
+        : InsertPoint(), Src(Src), DstOrSplit(&Dst), P(P) {}
+
+    bool isSplit() const override {
+      return Src.succ_size() > 1 && DstOrSplit->pred_size() > 1;
+    }
+
+    uint64_t frequency(const Pass &P) const override;
+    bool canMaterialize() const override;
+  };
+
+  /// Struct used to represent the placement of a repairing point for
+  /// a given operand.
+  class RepairingPlacement {
+  public:
+    /// Define the kind of action this repairing needs.
+    enum RepairingKind {
+      /// Nothing to repair, just drop this action.
+      None,
+      /// Reparing code needs to happen before InsertPoints.
+      Insert,
+      /// (Re)assign the register bank of the operand.
+      Reassign,
+      /// Mark this repairing placement as impossible.
+      Impossible
+    };
+
+    /// \name Convenient types for a list of insertion points.
+    /// @{
+    using InsertionPoints = SmallVector<std::unique_ptr<InsertPoint>, 2>;
+    using insertpt_iterator = InsertionPoints::iterator;
+    using const_insertpt_iterator = InsertionPoints::const_iterator;
+    /// @}
+
+  private:
+    /// Kind of repairing.
+    RepairingKind Kind;
+    /// Index of the operand that will be repaired.
+    unsigned OpIdx;
+    /// Are all the insert points materializeable?
+    bool CanMaterialize;
+    /// Is there any of the insert points needing splitting?
+    bool HasSplit = false;
+    /// Insertion point for the repair code.
+    /// The repairing code needs to happen just before these points.
+    InsertionPoints InsertPoints;
+    /// Some insertion points may need to update the liveness and such.
+    Pass &P;
+
+  public:
+    /// Create a repairing placement for the \p OpIdx-th operand of
+    /// \p MI. \p TRI is used to make some checks on the register aliases
+    /// if the machine operand is a physical register. \p P is used to
+    /// to update liveness information and such when materializing the
+    /// points.
+    RepairingPlacement(MachineInstr &MI, unsigned OpIdx,
+                       const TargetRegisterInfo &TRI, Pass &P,
+                       RepairingKind Kind = RepairingKind::Insert);
+
+    /// \name Getters.
+    /// @{
+    RepairingKind getKind() const { return Kind; }
+    unsigned getOpIdx() const { return OpIdx; }
+    bool canMaterialize() const { return CanMaterialize; }
+    bool hasSplit() { return HasSplit; }
+    /// @}
+
+    /// \name Overloaded methods to add an insertion point.
+    /// @{
+    /// Add a MBBInsertionPoint to the list of InsertPoints.
+    void addInsertPoint(MachineBasicBlock &MBB, bool Beginning);
+    /// Add a InstrInsertionPoint to the list of InsertPoints.
+    void addInsertPoint(MachineInstr &MI, bool Before);
+    /// Add an EdgeInsertionPoint (\p Src, \p Dst) to the list of InsertPoints.
+    void addInsertPoint(MachineBasicBlock &Src, MachineBasicBlock &Dst);
+    /// Add an InsertPoint to the list of insert points.
+    /// This method takes the ownership of &\p Point.
+    void addInsertPoint(InsertPoint &Point);
+    /// @}
+
+    /// \name Accessors related to the insertion points.
+    /// @{
+    insertpt_iterator begin() { return InsertPoints.begin(); }
+    insertpt_iterator end() { return InsertPoints.end(); }
+
+    const_insertpt_iterator begin() const { return InsertPoints.begin(); }
+    const_insertpt_iterator end() const { return InsertPoints.end(); }
+
+    unsigned getNumInsertPoints() const { return InsertPoints.size(); }
+    /// @}
+
+    /// Change the type of this repairing placement to \p NewKind.
+    /// It is not possible to switch a repairing placement to the
+    /// RepairingKind::Insert. There is no fundamental problem with
+    /// that, but no uses as well, so do not support it for now.
+    ///
+    /// \pre NewKind != RepairingKind::Insert
+    /// \post getKind() == NewKind
+    void switchTo(RepairingKind NewKind) {
+      assert(NewKind != Kind && "Already of the right Kind");
+      Kind = NewKind;
+      InsertPoints.clear();
+      CanMaterialize = NewKind != RepairingKind::Impossible;
+      HasSplit = false;
+      assert(NewKind != RepairingKind::Insert &&
+             "We would need more MI to switch to Insert");
+    }
+  };
+
+private:
+  /// Helper class used to represent the cost for mapping an instruction.
+  /// When mapping an instruction, we may introduce some repairing code.
+  /// In most cases, the repairing code is local to the instruction,
+  /// thus, we can omit the basic block frequency from the cost.
+  /// However, some alternatives may produce non-local cost, e.g., when
+  /// repairing a phi, and thus we then need to scale the local cost
+  /// to the non-local cost. This class does this for us.
+  /// \note: We could simply always scale the cost. The problem is that
+  /// there are higher chances that we saturate the cost easier and end
+  /// up having the same cost for actually different alternatives.
+  /// Another option would be to use APInt everywhere.
+  class MappingCost {
+  private:
+    /// Cost of the local instructions.
+    /// This cost is free of basic block frequency.
+    uint64_t LocalCost = 0;
+    /// Cost of the non-local instructions.
+    /// This cost should include the frequency of the related blocks.
+    uint64_t NonLocalCost = 0;
+    /// Frequency of the block where the local instructions live.
+    uint64_t LocalFreq;
+
+    MappingCost(uint64_t LocalCost, uint64_t NonLocalCost, uint64_t LocalFreq)
+        : LocalCost(LocalCost), NonLocalCost(NonLocalCost),
+          LocalFreq(LocalFreq) {}
+
+    /// Check if this cost is saturated.
+    bool isSaturated() const;
+
+  public:
+    /// Create a MappingCost assuming that most of the instructions
+    /// will occur in a basic block with \p LocalFreq frequency.
+    MappingCost(const BlockFrequency &LocalFreq);
+
+    /// Add \p Cost to the local cost.
+    /// \return true if this cost is saturated, false otherwise.
+    bool addLocalCost(uint64_t Cost);
+
+    /// Add \p Cost to the non-local cost.
+    /// Non-local cost should reflect the frequency of their placement.
+    /// \return true if this cost is saturated, false otherwise.
+    bool addNonLocalCost(uint64_t Cost);
+
+    /// Saturate the cost to the maximal representable value.
+    void saturate();
+
+    /// Return an instance of MappingCost that represents an
+    /// impossible mapping.
+    static MappingCost ImpossibleCost();
+
+    /// Check if this is less than \p Cost.
+    bool operator<(const MappingCost &Cost) const;
+    /// Check if this is equal to \p Cost.
+    bool operator==(const MappingCost &Cost) const;
+    /// Check if this is not equal to \p Cost.
+    bool operator!=(const MappingCost &Cost) const { return !(*this == Cost); }
+    /// Check if this is greater than \p Cost.
+    bool operator>(const MappingCost &Cost) const {
+      return *this != Cost && Cost < *this;
+    }
+
+    /// Print this on dbgs() stream.
+    void dump() const;
+
+    /// Print this on \p OS;
+    void print(raw_ostream &OS) const;
+
+    /// Overload the stream operator for easy debug printing.
+    friend raw_ostream &operator<<(raw_ostream &OS, const MappingCost &Cost) {
+      Cost.print(OS);
+      return OS;
+    }
+  };
+
+  /// Interface to the target lowering info related
+  /// to register banks.
+  const RegisterBankInfo *RBI = nullptr;
+
+  /// MRI contains all the register class/bank information that this
+  /// pass uses and updates.
+  MachineRegisterInfo *MRI = nullptr;
+
+  /// Information on the register classes for the current function.
+  const TargetRegisterInfo *TRI = nullptr;
+
+  /// Get the frequency of blocks.
+  /// This is required for non-fast mode.
+  MachineBlockFrequencyInfo *MBFI = nullptr;
+
+  /// Get the frequency of the edges.
+  /// This is required for non-fast mode.
+  MachineBranchProbabilityInfo *MBPI = nullptr;
+
+  /// Current optimization remark emitter. Used to report failures.
+  std::unique_ptr<MachineOptimizationRemarkEmitter> MORE;
+
+  /// Helper class used for every code morphing.
+  MachineIRBuilder MIRBuilder;
+
+  /// Optimization mode of the pass.
+  Mode OptMode;
+
+  /// Current target configuration. Controls how the pass handles errors.
+  const TargetPassConfig *TPC;
+
+  /// Assign the register bank of each operand of \p MI.
+  /// \return True on success, false otherwise.
+  bool assignInstr(MachineInstr &MI);
+
+  /// Initialize the field members using \p MF.
+  void init(MachineFunction &MF);
+
+  /// Check if \p Reg is already assigned what is described by \p ValMapping.
+  /// \p OnlyAssign == true means that \p Reg just needs to be assigned a
+  /// register bank.  I.e., no repairing is necessary to have the
+  /// assignment match.
+  bool assignmentMatch(unsigned Reg,
+                       const RegisterBankInfo::ValueMapping &ValMapping,
+                       bool &OnlyAssign) const;
+
+  /// Insert repairing code for \p Reg as specified by \p ValMapping.
+  /// The repairing placement is specified by \p RepairPt.
+  /// \p NewVRegs contains all the registers required to remap \p Reg.
+  /// In other words, the number of registers in NewVRegs must be equal
+  /// to ValMapping.BreakDown.size().
+  ///
+  /// The transformation could be sketched as:
+  /// \code
+  /// ... = op Reg
+  /// \endcode
+  /// Becomes
+  /// \code
+  /// <NewRegs> = COPY or extract Reg
+  /// ... = op Reg
+  /// \endcode
+  ///
+  /// and
+  /// \code
+  /// Reg = op ...
+  /// \endcode
+  /// Becomes
+  /// \code
+  /// Reg = op ...
+  /// Reg = COPY or build_sequence <NewRegs>
+  /// \endcode
+  ///
+  /// \pre NewVRegs.size() == ValMapping.BreakDown.size()
+  ///
+  /// \note The caller is supposed to do the rewriting of op if need be.
+  /// I.e., Reg = op ... => <NewRegs> = NewOp ...
+  ///
+  /// \return True if the repairing worked, false otherwise.
+  bool repairReg(MachineOperand &MO,
+                 const RegisterBankInfo::ValueMapping &ValMapping,
+                 RegBankSelect::RepairingPlacement &RepairPt,
+                 const iterator_range<SmallVectorImpl<unsigned>::const_iterator>
+                     &NewVRegs);
+
+  /// Return the cost of the instruction needed to map \p MO to \p ValMapping.
+  /// The cost is free of basic block frequencies.
+  /// \pre MO.isReg()
+  /// \pre MO is assigned to a register bank.
+  /// \pre ValMapping is a valid mapping for MO.
+  uint64_t
+  getRepairCost(const MachineOperand &MO,
+                const RegisterBankInfo::ValueMapping &ValMapping) const;
+
+  /// Find the best mapping for \p MI from \p PossibleMappings.
+  /// \return a reference on the best mapping in \p PossibleMappings.
+  const RegisterBankInfo::InstructionMapping &
+  findBestMapping(MachineInstr &MI,
+                  RegisterBankInfo::InstructionMappings &PossibleMappings,
+                  SmallVectorImpl<RepairingPlacement> &RepairPts);
+
+  /// Compute the cost of mapping \p MI with \p InstrMapping and
+  /// compute the repairing placement for such mapping in \p
+  /// RepairPts.
+  /// \p BestCost is used to specify when the cost becomes too high
+  /// and thus it is not worth computing the RepairPts.  Moreover if
+  /// \p BestCost == nullptr, the mapping cost is actually not
+  /// computed.
+  MappingCost
+  computeMapping(MachineInstr &MI,
+                 const RegisterBankInfo::InstructionMapping &InstrMapping,
+                 SmallVectorImpl<RepairingPlacement> &RepairPts,
+                 const MappingCost *BestCost = nullptr);
+
+  /// When \p RepairPt involves splitting to repair \p MO for the
+  /// given \p ValMapping, try to change the way we repair such that
+  /// the splitting is not required anymore.
+  ///
+  /// \pre \p RepairPt.hasSplit()
+  /// \pre \p MO == MO.getParent()->getOperand(\p RepairPt.getOpIdx())
+  /// \pre \p ValMapping is the mapping of \p MO for MO.getParent()
+  ///      that implied \p RepairPt.
+  void tryAvoidingSplit(RegBankSelect::RepairingPlacement &RepairPt,
+                        const MachineOperand &MO,
+                        const RegisterBankInfo::ValueMapping &ValMapping) const;
+
+  /// Apply \p Mapping to \p MI. \p RepairPts represents the different
+  /// mapping action that need to happen for the mapping to be
+  /// applied.
+  /// \return True if the mapping was applied sucessfully, false otherwise.
+  bool applyMapping(MachineInstr &MI,
+                    const RegisterBankInfo::InstructionMapping &InstrMapping,
+                    SmallVectorImpl<RepairingPlacement> &RepairPts);
+
+public:
+  /// Create a RegBankSelect pass with the specified \p RunningMode.
+  RegBankSelect(Mode RunningMode = Fast);
+
+  StringRef getPassName() const override { return "RegBankSelect"; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  MachineFunctionProperties getRequiredProperties() const override {
+    return MachineFunctionProperties()
+        .set(MachineFunctionProperties::Property::IsSSA)
+        .set(MachineFunctionProperties::Property::Legalized);
+  }
+
+  MachineFunctionProperties getSetProperties() const override {
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::RegBankSelected);
+  }
+
+  /// Walk through \p MF and assign a register bank to every virtual register
+  /// that are still mapped to nothing.
+  /// The target needs to provide a RegisterBankInfo and in particular
+  /// override RegisterBankInfo::getInstrMapping.
+  ///
+  /// Simplified algo:
+  /// \code
+  ///   RBI = MF.subtarget.getRegBankInfo()
+  ///   MIRBuilder.setMF(MF)
+  ///   for each bb in MF
+  ///     for each inst in bb
+  ///       MIRBuilder.setInstr(inst)
+  ///       MappingCosts = RBI.getMapping(inst);
+  ///       Idx = findIdxOfMinCost(MappingCosts)
+  ///       CurRegBank = MappingCosts[Idx].RegBank
+  ///       MRI.setRegBank(inst.getOperand(0).getReg(), CurRegBank)
+  ///       for each argument in inst
+  ///         if (CurRegBank != argument.RegBank)
+  ///           ArgReg = argument.getReg()
+  ///           Tmp = MRI.createNewVirtual(MRI.getSize(ArgReg), CurRegBank)
+  ///           MIRBuilder.buildInstr(COPY, Tmp, ArgReg)
+  ///           inst.getOperand(argument.getOperandNo()).setReg(Tmp)
+  /// \endcode
+  bool runOnMachineFunction(MachineFunction &MF) override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_REGBANKSELECT_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBank.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBank.h
new file mode 100644
index 0000000..5d75842
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBank.h
@@ -0,0 +1,99 @@
+//==-- llvm/CodeGen/GlobalISel/RegisterBank.h - Register Bank ----*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API of register banks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_REGBANK_H
+#define LLVM_CODEGEN_GLOBALISEL_REGBANK_H
+
+#include "llvm/ADT/BitVector.h"
+
+namespace llvm {
+// Forward declarations.
+class RegisterBankInfo;
+class raw_ostream;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+/// This class implements the register bank concept.
+/// Two instances of RegisterBank must have different ID.
+/// This property is enforced by the RegisterBankInfo class.
+class RegisterBank {
+private:
+  unsigned ID;
+  const char *Name;
+  unsigned Size;
+  BitVector ContainedRegClasses;
+
+  /// Sentinel value used to recognize register bank not properly
+  /// initialized yet.
+  static const unsigned InvalidID;
+
+  /// Only the RegisterBankInfo can initialize RegisterBank properly.
+  friend RegisterBankInfo;
+
+public:
+  RegisterBank(unsigned ID, const char *Name, unsigned Size,
+               const uint32_t *ContainedRegClasses, unsigned NumRegClasses);
+
+  /// Get the identifier of this register bank.
+  unsigned getID() const { return ID; }
+
+  /// Get a user friendly name of this register bank.
+  /// Should be used only for debugging purposes.
+  const char *getName() const { return Name; }
+
+  /// Get the maximal size in bits that fits in this register bank.
+  unsigned getSize() const { return Size; }
+
+  /// Check whether this instance is ready to be used.
+  bool isValid() const;
+
+  /// Check if this register bank is valid. In other words,
+  /// if it has been properly constructed.
+  ///
+  /// \note This method does not check anything when assertions are disabled.
+  ///
+  /// \return True is the check was successful.
+  bool verify(const TargetRegisterInfo &TRI) const;
+
+  /// Check whether this register bank covers \p RC.
+  /// In other words, check if this register bank fully covers
+  /// the registers that \p RC contains.
+  /// \pre isValid()
+  bool covers(const TargetRegisterClass &RC) const;
+
+  /// Check whether \p OtherRB is the same as this.
+  bool operator==(const RegisterBank &OtherRB) const;
+  bool operator!=(const RegisterBank &OtherRB) const {
+    return !this->operator==(OtherRB);
+  }
+
+  /// Dump the register mask on dbgs() stream.
+  /// The dump is verbose.
+  void dump(const TargetRegisterInfo *TRI = nullptr) const;
+
+  /// Print the register mask on OS.
+  /// If IsForDebug is false, then only the name of the register bank
+  /// is printed. Otherwise, all the fields are printing.
+  /// TRI is then used to print the name of the register classes that
+  /// this register bank covers.
+  void print(raw_ostream &OS, bool IsForDebug = false,
+             const TargetRegisterInfo *TRI = nullptr) const;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const RegisterBank &RegBank) {
+  RegBank.print(OS);
+  return OS;
+}
+} // End namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
new file mode 100644
index 0000000..82fd7ed
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
@@ -0,0 +1,757 @@
+//===- llvm/CodeGen/GlobalISel/RegisterBankInfo.h ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API for the register bank info.
+/// This API is responsible for handling the register banks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_REGISTERBANKINFO_H
+#define LLVM_CODEGEN_GLOBALISEL_REGISTERBANKINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <initializer_list>
+#include <memory>
+
+namespace llvm {
+
+class MachineInstr;
+class MachineRegisterInfo;
+class raw_ostream;
+class RegisterBank;
+class TargetInstrInfo;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+/// Holds all the information related to register banks.
+class RegisterBankInfo {
+public:
+  /// Helper struct that represents how a value is partially mapped
+  /// into a register.
+  /// The StartIdx and Length represent what region of the orginal
+  /// value this partial mapping covers.
+  /// This can be represented as a Mask of contiguous bit starting
+  /// at StartIdx bit and spanning Length bits.
+  /// StartIdx is the number of bits from the less significant bits.
+  struct PartialMapping {
+    /// Number of bits at which this partial mapping starts in the
+    /// original value.  The bits are counted from less significant
+    /// bits to most significant bits.
+    unsigned StartIdx;
+
+    /// Length of this mapping in bits. This is how many bits this
+    /// partial mapping covers in the original value:
+    /// from StartIdx to StartIdx + Length -1.
+    unsigned Length;
+
+    /// Register bank where the partial value lives.
+    const RegisterBank *RegBank;
+
+    PartialMapping() = default;
+
+    /// Provide a shortcut for quickly building PartialMapping.
+    PartialMapping(unsigned StartIdx, unsigned Length,
+                   const RegisterBank &RegBank)
+        : StartIdx(StartIdx), Length(Length), RegBank(&RegBank) {}
+
+    /// \return the index of in the original value of the most
+    /// significant bit that this partial mapping covers.
+    unsigned getHighBitIdx() const { return StartIdx + Length - 1; }
+
+    /// Print this partial mapping on dbgs() stream.
+    void dump() const;
+
+    /// Print this partial mapping on \p OS;
+    void print(raw_ostream &OS) const;
+
+    /// Check that the Mask is compatible with the RegBank.
+    /// Indeed, if the RegBank cannot accomadate the "active bits" of the mask,
+    /// there is no way this mapping is valid.
+    ///
+    /// \note This method does not check anything when assertions are disabled.
+    ///
+    /// \return True is the check was successful.
+    bool verify() const;
+  };
+
+  /// Helper struct that represents how a value is mapped through
+  /// different register banks.
+  ///
+  /// \note: So far we do not have any users of the complex mappings
+  /// (mappings with more than one partial mapping), but when we do,
+  /// we would have needed to duplicate partial mappings.
+  /// The alternative could be to use an array of pointers of partial
+  /// mapping (i.e., PartialMapping **BreakDown) and duplicate the
+  /// pointers instead.
+  ///
+  /// E.g.,
+  /// Let say we have a 32-bit add and a <2 x 32-bit> vadd. We
+  /// can expand the
+  /// <2 x 32-bit> add into 2 x 32-bit add.
+  ///
+  /// Currently the TableGen-like file would look like:
+  /// \code
+  /// PartialMapping[] = {
+  /// /*32-bit add*/ {0, 32, GPR},
+  /// /*2x32-bit add*/ {0, 32, GPR}, {0, 32, GPR}, // <-- Same entry 3x
+  /// /*<2x32-bit> vadd {0, 64, VPR}
+  /// }; // PartialMapping duplicated.
+  ///
+  /// ValueMapping[] {
+  ///   /*plain 32-bit add*/ {&PartialMapping[0], 1},
+  ///   /*expanded vadd on 2xadd*/ {&PartialMapping[1], 2},
+  ///   /*plain <2x32-bit> vadd*/ {&PartialMapping[3], 1}
+  /// };
+  /// \endcode
+  ///
+  /// With the array of pointer, we would have:
+  /// \code
+  /// PartialMapping[] = {
+  /// /*32-bit add*/ {0, 32, GPR},
+  /// /*<2x32-bit> vadd {0, 64, VPR}
+  /// }; // No more duplication.
+  ///
+  /// BreakDowns[] = {
+  /// /*AddBreakDown*/ &PartialMapping[0],
+  /// /*2xAddBreakDown*/ &PartialMapping[0], &PartialMapping[0],
+  /// /*VAddBreakDown*/ &PartialMapping[1]
+  /// }; // Addresses of PartialMapping duplicated (smaller).
+  ///
+  /// ValueMapping[] {
+  ///   /*plain 32-bit add*/ {&BreakDowns[0], 1},
+  ///   /*expanded vadd on 2xadd*/ {&BreakDowns[1], 2},
+  ///   /*plain <2x32-bit> vadd*/ {&BreakDowns[3], 1}
+  /// };
+  /// \endcode
+  ///
+  /// Given that a PartialMapping is actually small, the code size
+  /// impact is actually a degradation. Moreover the compile time will
+  /// be hit by the additional indirection.
+  /// If PartialMapping gets bigger we may reconsider.
+  struct ValueMapping {
+    /// How the value is broken down between the different register banks.
+    const PartialMapping *BreakDown;
+
+    /// Number of partial mapping to break down this value.
+    unsigned NumBreakDowns;
+
+    /// The default constructor creates an invalid (isValid() == false)
+    /// instance.
+    ValueMapping() : ValueMapping(nullptr, 0) {}
+
+    /// Initialize a ValueMapping with the given parameter.
+    /// \p BreakDown needs to have a life time at least as long
+    /// as this instance.
+    ValueMapping(const PartialMapping *BreakDown, unsigned NumBreakDowns)
+        : BreakDown(BreakDown), NumBreakDowns(NumBreakDowns) {}
+
+    /// Iterators through the PartialMappings.
+    const PartialMapping *begin() const { return BreakDown; }
+    const PartialMapping *end() const { return BreakDown + NumBreakDowns; }
+
+    /// Check if this ValueMapping is valid.
+    bool isValid() const { return BreakDown && NumBreakDowns; }
+
+    /// Verify that this mapping makes sense for a value of
+    /// \p MeaningfulBitWidth.
+    /// \note This method does not check anything when assertions are disabled.
+    ///
+    /// \return True is the check was successful.
+    bool verify(unsigned MeaningfulBitWidth) const;
+
+    /// Print this on dbgs() stream.
+    void dump() const;
+
+    /// Print this on \p OS;
+    void print(raw_ostream &OS) const;
+  };
+
+  /// Helper class that represents how the value of an instruction may be
+  /// mapped and what is the related cost of such mapping.
+  class InstructionMapping {
+    /// Identifier of the mapping.
+    /// This is used to communicate between the target and the optimizers
+    /// which mapping should be realized.
+    unsigned ID = InvalidMappingID;
+
+    /// Cost of this mapping.
+    unsigned Cost = 0;
+
+    /// Mapping of all the operands.
+    const ValueMapping *OperandsMapping;
+
+    /// Number of operands.
+    unsigned NumOperands = 0;
+
+    const ValueMapping &getOperandMapping(unsigned i) {
+      assert(i < getNumOperands() && "Out of bound operand");
+      return OperandsMapping[i];
+    }
+
+  public:
+    /// Constructor for the mapping of an instruction.
+    /// \p NumOperands must be equal to number of all the operands of
+    /// the related instruction.
+    /// The rationale is that it is more efficient for the optimizers
+    /// to be able to assume that the mapping of the ith operand is
+    /// at the index i.
+    ///
+    /// \pre ID != InvalidMappingID
+    InstructionMapping(unsigned ID, unsigned Cost,
+                       const ValueMapping *OperandsMapping,
+                       unsigned NumOperands)
+        : ID(ID), Cost(Cost), OperandsMapping(OperandsMapping),
+          NumOperands(NumOperands) {
+      assert(getID() != InvalidMappingID &&
+             "Use the default constructor for invalid mapping");
+    }
+
+    /// Default constructor.
+    /// Use this constructor to express that the mapping is invalid.
+    InstructionMapping() = default;
+
+    /// Get the cost.
+    unsigned getCost() const { return Cost; }
+
+    /// Get the ID.
+    unsigned getID() const { return ID; }
+
+    /// Get the number of operands.
+    unsigned getNumOperands() const { return NumOperands; }
+
+    /// Get the value mapping of the ith operand.
+    /// \pre The mapping for the ith operand has been set.
+    /// \pre The ith operand is a register.
+    const ValueMapping &getOperandMapping(unsigned i) const {
+      const ValueMapping &ValMapping =
+          const_cast<InstructionMapping *>(this)->getOperandMapping(i);
+      return ValMapping;
+    }
+
+    /// Set the mapping for all the operands.
+    /// In other words, OpdsMapping should hold at least getNumOperands
+    /// ValueMapping.
+    void setOperandsMapping(const ValueMapping *OpdsMapping) {
+      OperandsMapping = OpdsMapping;
+    }
+
+    /// Check whether this object is valid.
+    /// This is a lightweight check for obvious wrong instance.
+    bool isValid() const {
+      return getID() != InvalidMappingID && OperandsMapping;
+    }
+
+    /// Verifiy that this mapping makes sense for \p MI.
+    /// \pre \p MI must be connected to a MachineFunction.
+    ///
+    /// \note This method does not check anything when assertions are disabled.
+    ///
+    /// \return True is the check was successful.
+    bool verify(const MachineInstr &MI) const;
+
+    /// Print this on dbgs() stream.
+    void dump() const;
+
+    /// Print this on \p OS;
+    void print(raw_ostream &OS) const;
+  };
+
+  /// Convenient type to represent the alternatives for mapping an
+  /// instruction.
+  /// \todo When we move to TableGen this should be an array ref.
+  using InstructionMappings = SmallVector<const InstructionMapping *, 4>;
+
+  /// Helper class used to get/create the virtual registers that will be used
+  /// to replace the MachineOperand when applying a mapping.
+  class OperandsMapper {
+    /// The OpIdx-th cell contains the index in NewVRegs where the VRegs of the
+    /// OpIdx-th operand starts. -1 means we do not have such mapping yet.
+    /// Note: We use a SmallVector to avoid heap allocation for most cases.
+    SmallVector<int, 8> OpToNewVRegIdx;
+
+    /// Hold the registers that will be used to map MI with InstrMapping.
+    SmallVector<unsigned, 8> NewVRegs;
+
+    /// Current MachineRegisterInfo, used to create new virtual registers.
+    MachineRegisterInfo &MRI;
+
+    /// Instruction being remapped.
+    MachineInstr &MI;
+
+    /// New mapping of the instruction.
+    const InstructionMapping &InstrMapping;
+
+    /// Constant value identifying that the index in OpToNewVRegIdx
+    /// for an operand has not been set yet.
+    static const int DontKnowIdx;
+
+    /// Get the range in NewVRegs to store all the partial
+    /// values for the \p OpIdx-th operand.
+    ///
+    /// \return The iterator range for the space created.
+    //
+    /// \pre getMI().getOperand(OpIdx).isReg()
+    iterator_range<SmallVectorImpl<unsigned>::iterator>
+    getVRegsMem(unsigned OpIdx);
+
+    /// Get the end iterator for a range starting at \p StartIdx and
+    /// spannig \p NumVal in NewVRegs.
+    /// \pre StartIdx + NumVal <= NewVRegs.size()
+    SmallVectorImpl<unsigned>::const_iterator
+    getNewVRegsEnd(unsigned StartIdx, unsigned NumVal) const;
+    SmallVectorImpl<unsigned>::iterator getNewVRegsEnd(unsigned StartIdx,
+                                                       unsigned NumVal);
+
+  public:
+    /// Create an OperandsMapper that will hold the information to apply \p
+    /// InstrMapping to \p MI.
+    /// \pre InstrMapping.verify(MI)
+    OperandsMapper(MachineInstr &MI, const InstructionMapping &InstrMapping,
+                   MachineRegisterInfo &MRI);
+
+    /// \name Getters.
+    /// @{
+    /// The MachineInstr being remapped.
+    MachineInstr &getMI() const { return MI; }
+
+    /// The final mapping of the instruction.
+    const InstructionMapping &getInstrMapping() const { return InstrMapping; }
+
+    /// The MachineRegisterInfo we used to realize the mapping.
+    MachineRegisterInfo &getMRI() const { return MRI; }
+    /// @}
+
+    /// Create as many new virtual registers as needed for the mapping of the \p
+    /// OpIdx-th operand.
+    /// The number of registers is determined by the number of breakdown for the
+    /// related operand in the instruction mapping.
+    /// The type of the new registers is a plain scalar of the right size.
+    /// The proper type is expected to be set when the mapping is applied to
+    /// the instruction(s) that realizes the mapping.
+    ///
+    /// \pre getMI().getOperand(OpIdx).isReg()
+    ///
+    /// \post All the partial mapping of the \p OpIdx-th operand have been
+    /// assigned a new virtual register.
+    void createVRegs(unsigned OpIdx);
+
+    /// Set the virtual register of the \p PartialMapIdx-th partial mapping of
+    /// the OpIdx-th operand to \p NewVReg.
+    ///
+    /// \pre getMI().getOperand(OpIdx).isReg()
+    /// \pre getInstrMapping().getOperandMapping(OpIdx).BreakDown.size() >
+    /// PartialMapIdx
+    /// \pre NewReg != 0
+    ///
+    /// \post the \p PartialMapIdx-th register of the value mapping of the \p
+    /// OpIdx-th operand has been set.
+    void setVRegs(unsigned OpIdx, unsigned PartialMapIdx, unsigned NewVReg);
+
+    /// Get all the virtual registers required to map the \p OpIdx-th operand of
+    /// the instruction.
+    ///
+    /// This return an empty range when createVRegs or setVRegs has not been
+    /// called.
+    /// The iterator may be invalidated by a call to setVRegs or createVRegs.
+    ///
+    /// When \p ForDebug is true, we will not check that the list of new virtual
+    /// registers does not contain uninitialized values.
+    ///
+    /// \pre getMI().getOperand(OpIdx).isReg()
+    /// \pre ForDebug || All partial mappings have been set a register
+    iterator_range<SmallVectorImpl<unsigned>::const_iterator>
+    getVRegs(unsigned OpIdx, bool ForDebug = false) const;
+
+    /// Print this operands mapper on dbgs() stream.
+    void dump() const;
+
+    /// Print this operands mapper on \p OS stream.
+    void print(raw_ostream &OS, bool ForDebug = false) const;
+  };
+
+protected:
+  /// Hold the set of supported register banks.
+  RegisterBank **RegBanks;
+
+  /// Total number of register banks.
+  unsigned NumRegBanks;
+
+  /// Keep dynamically allocated PartialMapping in a separate map.
+  /// This shouldn't be needed when everything gets TableGen'ed.
+  mutable DenseMap<unsigned, std::unique_ptr<const PartialMapping>>
+      MapOfPartialMappings;
+
+  /// Keep dynamically allocated ValueMapping in a separate map.
+  /// This shouldn't be needed when everything gets TableGen'ed.
+  mutable DenseMap<unsigned, std::unique_ptr<const ValueMapping>>
+      MapOfValueMappings;
+
+  /// Keep dynamically allocated array of ValueMapping in a separate map.
+  /// This shouldn't be needed when everything gets TableGen'ed.
+  mutable DenseMap<unsigned, std::unique_ptr<ValueMapping[]>>
+      MapOfOperandsMappings;
+
+  /// Keep dynamically allocated InstructionMapping in a separate map.
+  /// This shouldn't be needed when everything gets TableGen'ed.
+  mutable DenseMap<unsigned, std::unique_ptr<const InstructionMapping>>
+      MapOfInstructionMappings;
+
+  /// Getting the minimal register class of a physreg is expensive.
+  /// Cache this information as we get it.
+  mutable DenseMap<unsigned, const TargetRegisterClass *> PhysRegMinimalRCs;
+
+  /// Create a RegisterBankInfo that can accommodate up to \p NumRegBanks
+  /// RegisterBank instances.
+  RegisterBankInfo(RegisterBank **RegBanks, unsigned NumRegBanks);
+
+  /// This constructor is meaningless.
+  /// It just provides a default constructor that can be used at link time
+  /// when GlobalISel is not built.
+  /// That way, targets can still inherit from this class without doing
+  /// crazy gymnastic to avoid link time failures.
+  /// \note That works because the constructor is inlined.
+  RegisterBankInfo() {
+    llvm_unreachable("This constructor should not be executed");
+  }
+
+  /// Get the register bank identified by \p ID.
+  RegisterBank &getRegBank(unsigned ID) {
+    assert(ID < getNumRegBanks() && "Accessing an unknown register bank");
+    return *RegBanks[ID];
+  }
+
+  /// Get the MinimalPhysRegClass for Reg.
+  /// \pre Reg is a physical register.
+  const TargetRegisterClass &
+  getMinimalPhysRegClass(unsigned Reg, const TargetRegisterInfo &TRI) const;
+
+  /// Try to get the mapping of \p MI.
+  /// See getInstrMapping for more details on what a mapping represents.
+  ///
+  /// Unlike getInstrMapping the returned InstructionMapping may be invalid
+  /// (isValid() == false).
+  /// This means that the target independent code is not smart enough
+  /// to get the mapping of \p MI and thus, the target has to provide the
+  /// information for \p MI.
+  ///
+  /// This implementation is able to get the mapping of:
+  /// - Target specific instructions by looking at the encoding constraints.
+  /// - Any instruction if all the register operands have already been assigned
+  ///   a register, a register class, or a register bank.
+  /// - Copies and phis if at least one of the operands has been assigned a
+  ///   register, a register class, or a register bank.
+  /// In other words, this method will likely fail to find a mapping for
+  /// any generic opcode that has not been lowered by target specific code.
+  const InstructionMapping &getInstrMappingImpl(const MachineInstr &MI) const;
+
+  /// Get the uniquely generated PartialMapping for the
+  /// given arguments.
+  const PartialMapping &getPartialMapping(unsigned StartIdx, unsigned Length,
+                                          const RegisterBank &RegBank) const;
+
+  /// \name Methods to get a uniquely generated ValueMapping.
+  /// @{
+
+  /// The most common ValueMapping consists of a single PartialMapping.
+  /// Feature a method for that.
+  const ValueMapping &getValueMapping(unsigned StartIdx, unsigned Length,
+                                      const RegisterBank &RegBank) const;
+
+  /// Get the ValueMapping for the given arguments.
+  const ValueMapping &getValueMapping(const PartialMapping *BreakDown,
+                                      unsigned NumBreakDowns) const;
+  /// @}
+
+  /// \name Methods to get a uniquely generated array of ValueMapping.
+  /// @{
+
+  /// Get the uniquely generated array of ValueMapping for the
+  /// elements of between \p Begin and \p End.
+  ///
+  /// Elements that are nullptr will be replaced by
+  /// invalid ValueMapping (ValueMapping::isValid == false).
+  ///
+  /// \pre The pointers on ValueMapping between \p Begin and \p End
+  /// must uniquely identify a ValueMapping. Otherwise, there is no
+  /// guarantee that the return instance will be unique, i.e., another
+  /// OperandsMapping could have the same content.
+  template <typename Iterator>
+  const ValueMapping *getOperandsMapping(Iterator Begin, Iterator End) const;
+
+  /// Get the uniquely generated array of ValueMapping for the
+  /// elements of \p OpdsMapping.
+  ///
+  /// Elements of \p OpdsMapping that are nullptr will be replaced by
+  /// invalid ValueMapping (ValueMapping::isValid == false).
+  const ValueMapping *getOperandsMapping(
+      const SmallVectorImpl<const ValueMapping *> &OpdsMapping) const;
+
+  /// Get the uniquely generated array of ValueMapping for the
+  /// given arguments.
+  ///
+  /// Arguments that are nullptr will be replaced by invalid
+  /// ValueMapping (ValueMapping::isValid == false).
+  const ValueMapping *getOperandsMapping(
+      std::initializer_list<const ValueMapping *> OpdsMapping) const;
+  /// @}
+
+  /// \name Methods to get a uniquely generated InstructionMapping.
+  /// @{
+
+private:
+  /// Method to get a uniquely generated InstructionMapping.
+  const InstructionMapping &
+  getInstructionMappingImpl(bool IsInvalid, unsigned ID = InvalidMappingID,
+                            unsigned Cost = 0,
+                            const ValueMapping *OperandsMapping = nullptr,
+                            unsigned NumOperands = 0) const;
+
+public:
+  /// Method to get a uniquely generated InstructionMapping.
+  const InstructionMapping &
+  getInstructionMapping(unsigned ID, unsigned Cost,
+                        const ValueMapping *OperandsMapping,
+                        unsigned NumOperands) const {
+    return getInstructionMappingImpl(/*IsInvalid*/ false, ID, Cost,
+                                     OperandsMapping, NumOperands);
+  }
+
+  /// Method to get a uniquely generated invalid InstructionMapping.
+  const InstructionMapping &getInvalidInstructionMapping() const {
+    return getInstructionMappingImpl(/*IsInvalid*/ true);
+  }
+  /// @}
+
+  /// Get the register bank for the \p OpIdx-th operand of \p MI form
+  /// the encoding constraints, if any.
+  ///
+  /// \return A register bank that covers the register class of the
+  /// related encoding constraints or nullptr if \p MI did not provide
+  /// enough information to deduce it.
+  const RegisterBank *
+  getRegBankFromConstraints(const MachineInstr &MI, unsigned OpIdx,
+                            const TargetInstrInfo &TII,
+                            const TargetRegisterInfo &TRI) const;
+
+  /// Helper method to apply something that is like the default mapping.
+  /// Basically, that means that \p OpdMapper.getMI() is left untouched
+  /// aside from the reassignment of the register operand that have been
+  /// remapped.
+  ///
+  /// The type of all the new registers that have been created by the
+  /// mapper are properly remapped to the type of the original registers
+  /// they replace. In other words, the semantic of the instruction does
+  /// not change, only the register banks.
+  ///
+  /// If the mapping of one of the operand spans several registers, this
+  /// method will abort as this is not like a default mapping anymore.
+  ///
+  /// \pre For OpIdx in {0..\p OpdMapper.getMI().getNumOperands())
+  ///        the range OpdMapper.getVRegs(OpIdx) is empty or of size 1.
+  static void applyDefaultMapping(const OperandsMapper &OpdMapper);
+
+  /// See ::applyMapping.
+  virtual void applyMappingImpl(const OperandsMapper &OpdMapper) const {
+    llvm_unreachable("The target has to implement that part");
+  }
+
+public:
+  virtual ~RegisterBankInfo() = default;
+
+  /// Get the register bank identified by \p ID.
+  const RegisterBank &getRegBank(unsigned ID) const {
+    return const_cast<RegisterBankInfo *>(this)->getRegBank(ID);
+  }
+
+  /// Get the register bank of \p Reg.
+  /// If Reg has not been assigned a register, a register class,
+  /// or a register bank, then this returns nullptr.
+  ///
+  /// \pre Reg != 0 (NoRegister)
+  const RegisterBank *getRegBank(unsigned Reg, const MachineRegisterInfo &MRI,
+                                 const TargetRegisterInfo &TRI) const;
+
+  /// Get the total number of register banks.
+  unsigned getNumRegBanks() const { return NumRegBanks; }
+
+  /// Get a register bank that covers \p RC.
+  ///
+  /// \pre \p RC is a user-defined register class (as opposed as one
+  /// generated by TableGen).
+  ///
+  /// \note The mapping RC -> RegBank could be built while adding the
+  /// coverage for the register banks. However, we do not do it, because,
+  /// at least for now, we only need this information for register classes
+  /// that are used in the description of instruction. In other words,
+  /// there are just a handful of them and we do not want to waste space.
+  ///
+  /// \todo This should be TableGen'ed.
+  virtual const RegisterBank &
+  getRegBankFromRegClass(const TargetRegisterClass &RC) const {
+    llvm_unreachable("The target must override this method");
+  }
+
+  /// Get the cost of a copy from \p B to \p A, or put differently,
+  /// get the cost of A = COPY B. Since register banks may cover
+  /// different size, \p Size specifies what will be the size in bits
+  /// that will be copied around.
+  ///
+  /// \note Since this is a copy, both registers have the same size.
+  virtual unsigned copyCost(const RegisterBank &A, const RegisterBank &B,
+                            unsigned Size) const {
+    // Optimistically assume that copies are coalesced. I.e., when
+    // they are on the same bank, they are free.
+    // Otherwise assume a non-zero cost of 1. The targets are supposed
+    // to override that properly anyway if they care.
+    return &A != &B;
+  }
+
+  /// Constrain the (possibly generic) virtual register \p Reg to \p RC.
+  ///
+  /// \pre \p Reg is a virtual register that either has a bank or a class.
+  /// \returns The constrained register class, or nullptr if there is none.
+  /// \note This is a generic variant of MachineRegisterInfo::constrainRegClass
+  /// \note Use MachineRegisterInfo::constrainRegAttrs instead for any non-isel
+  /// purpose, including non-select passes of GlobalISel
+  static const TargetRegisterClass *
+  constrainGenericRegister(unsigned Reg, const TargetRegisterClass &RC,
+                           MachineRegisterInfo &MRI);
+
+  /// Identifier used when the related instruction mapping instance
+  /// is generated by target independent code.
+  /// Make sure not to use that identifier to avoid possible collision.
+  static const unsigned DefaultMappingID;
+
+  /// Identifier used when the related instruction mapping instance
+  /// is generated by the default constructor.
+  /// Make sure not to use that identifier.
+  static const unsigned InvalidMappingID;
+
+  /// Get the mapping of the different operands of \p MI
+  /// on the register bank.
+  /// This mapping should be the direct translation of \p MI.
+  /// In other words, when \p MI is mapped with the returned mapping,
+  /// only the register banks of the operands of \p MI need to be updated.
+  /// In particular, neither the opcode nor the type of \p MI needs to be
+  /// updated for this direct mapping.
+  ///
+  /// The target independent implementation gives a mapping based on
+  /// the register classes for the target specific opcode.
+  /// It uses the ID RegisterBankInfo::DefaultMappingID for that mapping.
+  /// Make sure you do not use that ID for the alternative mapping
+  /// for MI. See getInstrAlternativeMappings for the alternative
+  /// mappings.
+  ///
+  /// For instance, if \p MI is a vector add, the mapping should
+  /// not be a scalarization of the add.
+  ///
+  /// \post returnedVal.verify(MI).
+  ///
+  /// \note If returnedVal does not verify MI, this would probably mean
+  /// that the target does not support that instruction.
+  virtual const InstructionMapping &
+  getInstrMapping(const MachineInstr &MI) const;
+
+  /// Get the alternative mappings for \p MI.
+  /// Alternative in the sense different from getInstrMapping.
+  virtual InstructionMappings
+  getInstrAlternativeMappings(const MachineInstr &MI) const;
+
+  /// Get the possible mapping for \p MI.
+  /// A mapping defines where the different operands may live and at what cost.
+  /// For instance, let us consider:
+  /// v0(16) = G_ADD <2 x i8> v1, v2
+  /// The possible mapping could be:
+  ///
+  /// {/*ID*/VectorAdd, /*Cost*/1, /*v0*/{(0xFFFF, VPR)}, /*v1*/{(0xFFFF, VPR)},
+  ///                              /*v2*/{(0xFFFF, VPR)}}
+  /// {/*ID*/ScalarAddx2, /*Cost*/2, /*v0*/{(0x00FF, GPR),(0xFF00, GPR)},
+  ///                                /*v1*/{(0x00FF, GPR),(0xFF00, GPR)},
+  ///                                /*v2*/{(0x00FF, GPR),(0xFF00, GPR)}}
+  ///
+  /// \note The first alternative of the returned mapping should be the
+  /// direct translation of \p MI current form.
+  ///
+  /// \post !returnedVal.empty().
+  InstructionMappings getInstrPossibleMappings(const MachineInstr &MI) const;
+
+  /// Apply \p OpdMapper.getInstrMapping() to \p OpdMapper.getMI().
+  /// After this call \p OpdMapper.getMI() may not be valid anymore.
+  /// \p OpdMapper.getInstrMapping().getID() carries the information of
+  /// what has been chosen to map \p OpdMapper.getMI(). This ID is set
+  /// by the various getInstrXXXMapping method.
+  ///
+  /// Therefore, getting the mapping and applying it should be kept in
+  /// sync.
+  void applyMapping(const OperandsMapper &OpdMapper) const {
+    // The only mapping we know how to handle is the default mapping.
+    if (OpdMapper.getInstrMapping().getID() == DefaultMappingID)
+      return applyDefaultMapping(OpdMapper);
+    // For other mapping, the target needs to do the right thing.
+    // If that means calling applyDefaultMapping, fine, but this
+    // must be explicitly stated.
+    applyMappingImpl(OpdMapper);
+  }
+
+  /// Get the size in bits of \p Reg.
+  /// Utility method to get the size of any registers. Unlike
+  /// MachineRegisterInfo::getSize, the register does not need to be a
+  /// virtual register.
+  ///
+  /// \pre \p Reg != 0 (NoRegister).
+  unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI,
+                         const TargetRegisterInfo &TRI) const;
+
+  /// Check that information hold by this instance make sense for the
+  /// given \p TRI.
+  ///
+  /// \note This method does not check anything when assertions are disabled.
+  ///
+  /// \return True is the check was successful.
+  bool verify(const TargetRegisterInfo &TRI) const;
+};
+
+inline raw_ostream &
+operator<<(raw_ostream &OS,
+           const RegisterBankInfo::PartialMapping &PartMapping) {
+  PartMapping.print(OS);
+  return OS;
+}
+
+inline raw_ostream &
+operator<<(raw_ostream &OS, const RegisterBankInfo::ValueMapping &ValMapping) {
+  ValMapping.print(OS);
+  return OS;
+}
+
+inline raw_ostream &
+operator<<(raw_ostream &OS,
+           const RegisterBankInfo::InstructionMapping &InstrMapping) {
+  InstrMapping.print(OS);
+  return OS;
+}
+
+inline raw_ostream &
+operator<<(raw_ostream &OS, const RegisterBankInfo::OperandsMapper &OpdMapper) {
+  OpdMapper.print(OS, /*ForDebug*/ false);
+  return OS;
+}
+
+/// Hashing function for PartialMapping.
+/// It is required for the hashing of ValueMapping.
+hash_code hash_value(const RegisterBankInfo::PartialMapping &PartMapping);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_REGISTERBANKINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Types.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Types.h
new file mode 100644
index 0000000..7b22e34
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Types.h
@@ -0,0 +1,34 @@
+//===- llvm/CodeGen/GlobalISel/Types.h - Types used by GISel ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file describes high level types that are used by several passes or
+/// APIs involved in the GlobalISel pipeline.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_TYPES_H
+#define LLVM_CODEGEN_GLOBALISEL_TYPES_H
+
+#include "llvm/ADT/DenseMap.h"
+
+namespace llvm {
+
+class Value;
+
+/// Map a value to a virtual register.
+/// For now, we chose to map aggregate types to on single virtual
+/// register. This might be revisited if it turns out to be inefficient.
+/// PR26161 tracks that.
+/// Note: We need to expose this type to the target hooks for thing like
+/// ABI lowering that would be used during IRTranslation.
+using ValueToVReg = DenseMap<const Value *, unsigned>;
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_TYPES_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h
new file mode 100644
index 0000000..837035f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -0,0 +1,106 @@
+//==-- llvm/CodeGen/GlobalISel/Utils.h ---------------------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API of helper functions used throughout the
+/// GlobalISel pipeline.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_UTILS_H
+#define LLVM_CODEGEN_GLOBALISEL_UTILS_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+class MachineFunction;
+class MachineInstr;
+class MachineOperand;
+class MachineOptimizationRemarkEmitter;
+class MachineOptimizationRemarkMissed;
+class MachineRegisterInfo;
+class MCInstrDesc;
+class RegisterBankInfo;
+class TargetInstrInfo;
+class TargetPassConfig;
+class TargetRegisterInfo;
+class TargetRegisterClass;
+class Twine;
+class ConstantFP;
+class APFloat;
+
+/// Try to constrain Reg to the specified register class. If this fails,
+/// create a new virtual register in the correct class and insert a COPY before
+/// \p InsertPt. The debug location of \p InsertPt is used for the new copy.
+///
+/// \return The virtual register constrained to the right register class.
+unsigned constrainRegToClass(MachineRegisterInfo &MRI,
+                             const TargetInstrInfo &TII,
+                             const RegisterBankInfo &RBI,
+                             MachineInstr &InsertPt, unsigned Reg,
+                             const TargetRegisterClass &RegClass);
+
+/// Try to constrain Reg so that it is usable by argument OpIdx of the
+/// provided MCInstrDesc \p II. If this fails, create a new virtual
+/// register in the correct class and insert a COPY before \p InsertPt.
+/// This is equivalent to constrainRegToClass() with RegClass obtained from the
+/// MCInstrDesc. The debug location of \p InsertPt is used for the new copy.
+///
+/// \return The virtual register constrained to the right register class.
+unsigned constrainOperandRegClass(const MachineFunction &MF,
+                                  const TargetRegisterInfo &TRI,
+                                  MachineRegisterInfo &MRI,
+                                  const TargetInstrInfo &TII,
+                                  const RegisterBankInfo &RBI,
+                                  MachineInstr &InsertPt, const MCInstrDesc &II,
+                                  const MachineOperand &RegMO, unsigned OpIdx);
+
+/// Mutate the newly-selected instruction \p I to constrain its (possibly
+/// generic) virtual register operands to the instruction's register class.
+/// This could involve inserting COPYs before (for uses) or after (for defs).
+/// This requires the number of operands to match the instruction description.
+/// \returns whether operand regclass constraining succeeded.
+///
+// FIXME: Not all instructions have the same number of operands. We should
+// probably expose a constrain helper per operand and let the target selector
+// constrain individual registers, like fast-isel.
+bool constrainSelectedInstRegOperands(MachineInstr &I,
+                                      const TargetInstrInfo &TII,
+                                      const TargetRegisterInfo &TRI,
+                                      const RegisterBankInfo &RBI);
+/// Check whether an instruction \p MI is dead: it only defines dead virtual
+/// registers, and doesn't have other side effects.
+bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI);
+
+/// Report an ISel error as a missed optimization remark to the LLVMContext's
+/// diagnostic stream.  Set the FailedISel MachineFunction property.
+void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
+                        MachineOptimizationRemarkEmitter &MORE,
+                        MachineOptimizationRemarkMissed &R);
+
+void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
+                        MachineOptimizationRemarkEmitter &MORE,
+                        const char *PassName, StringRef Msg,
+                        const MachineInstr &MI);
+
+Optional<int64_t> getConstantVRegVal(unsigned VReg,
+                                     const MachineRegisterInfo &MRI);
+const ConstantFP* getConstantFPVRegVal(unsigned VReg,
+                                       const MachineRegisterInfo &MRI);
+
+/// See if Reg is defined by an single def instruction that is
+/// Opcode. Also try to do trivial folding if it's a COPY with
+/// same types. Returns null otherwise.
+MachineInstr *getOpcodeDef(unsigned Opcode, unsigned Reg,
+                           const MachineRegisterInfo &MRI);
+
+/// Returns an APFloat from Val converted to the appropriate size.
+APFloat getAPFloatFromSize(double Val, unsigned Size);
+} // End namespace llvm.
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/ISDOpcodes.h b/linux-x64/clang/include/llvm/CodeGen/ISDOpcodes.h
new file mode 100644
index 0000000..ea94871
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ISDOpcodes.h
@@ -0,0 +1,997 @@
+//===-- llvm/CodeGen/ISDOpcodes.h - CodeGen opcodes -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares codegen opcodes and related utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ISDOPCODES_H
+#define LLVM_CODEGEN_ISDOPCODES_H
+
+namespace llvm {
+
+/// ISD namespace - This namespace contains an enum which represents all of the
+/// SelectionDAG node types and value types.
+///
+namespace ISD {
+
+  //===--------------------------------------------------------------------===//
+  /// ISD::NodeType enum - This enum defines the target-independent operators
+  /// for a SelectionDAG.
+  ///
+  /// Targets may also define target-dependent operator codes for SDNodes. For
+  /// example, on x86, these are the enum values in the X86ISD namespace.
+  /// Targets should aim to use target-independent operators to model their
+  /// instruction sets as much as possible, and only use target-dependent
+  /// operators when they have special requirements.
+  ///
+  /// Finally, during and after selection proper, SNodes may use special
+  /// operator codes that correspond directly with MachineInstr opcodes. These
+  /// are used to represent selected instructions. See the isMachineOpcode()
+  /// and getMachineOpcode() member functions of SDNode.
+  ///
+  enum NodeType {
+    /// DELETED_NODE - This is an illegal value that is used to catch
+    /// errors.  This opcode is not a legal opcode for any node.
+    DELETED_NODE,
+
+    /// EntryToken - This is the marker used to indicate the start of a region.
+    EntryToken,
+
+    /// TokenFactor - This node takes multiple tokens as input and produces a
+    /// single token result. This is used to represent the fact that the operand
+    /// operators are independent of each other.
+    TokenFactor,
+
+    /// AssertSext, AssertZext - These nodes record if a register contains a
+    /// value that has already been zero or sign extended from a narrower type.
+    /// These nodes take two operands.  The first is the node that has already
+    /// been extended, and the second is a value type node indicating the width
+    /// of the extension
+    AssertSext, AssertZext,
+
+    /// Various leaf nodes.
+    BasicBlock, VALUETYPE, CONDCODE, Register, RegisterMask,
+    Constant, ConstantFP,
+    GlobalAddress, GlobalTLSAddress, FrameIndex,
+    JumpTable, ConstantPool, ExternalSymbol, BlockAddress,
+
+    /// The address of the GOT
+    GLOBAL_OFFSET_TABLE,
+
+    /// FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and
+    /// llvm.returnaddress on the DAG.  These nodes take one operand, the index
+    /// of the frame or return address to return.  An index of zero corresponds
+    /// to the current function's frame or return address, an index of one to
+    /// the parent's frame or return address, and so on.
+    FRAMEADDR, RETURNADDR, ADDROFRETURNADDR,
+
+    /// LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
+    /// Materializes the offset from the local object pointer of another
+    /// function to a particular local object passed to llvm.localescape. The
+    /// operand is the MCSymbol label used to represent this offset, since
+    /// typically the offset is not known until after code generation of the
+    /// parent.
+    LOCAL_RECOVER,
+
+    /// READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on
+    /// the DAG, which implements the named register global variables extension.
+    READ_REGISTER,
+    WRITE_REGISTER,
+
+    /// FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
+    /// first (possible) on-stack argument. This is needed for correct stack
+    /// adjustment during unwind.
+    FRAME_TO_ARGS_OFFSET,
+
+    /// EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical
+    /// Frame Address (CFA), generally the value of the stack pointer at the
+    /// call site in the previous frame.
+    EH_DWARF_CFA,
+
+    /// OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
+    /// 'eh_return' gcc dwarf builtin, which is used to return from
+    /// exception. The general meaning is: adjust stack by OFFSET and pass
+    /// execution to HANDLER. Many platform-related details also :)
+    EH_RETURN,
+
+    /// RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer)
+    /// This corresponds to the eh.sjlj.setjmp intrinsic.
+    /// It takes an input chain and a pointer to the jump buffer as inputs
+    /// and returns an outchain.
+    EH_SJLJ_SETJMP,
+
+    /// OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer)
+    /// This corresponds to the eh.sjlj.longjmp intrinsic.
+    /// It takes an input chain and a pointer to the jump buffer as inputs
+    /// and returns an outchain.
+    EH_SJLJ_LONGJMP,
+
+    /// OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN)
+    /// The target initializes the dispatch table here.
+    EH_SJLJ_SETUP_DISPATCH,
+
+    /// TargetConstant* - Like Constant*, but the DAG does not do any folding,
+    /// simplification, or lowering of the constant. They are used for constants
+    /// which are known to fit in the immediate fields of their users, or for
+    /// carrying magic numbers which are not values which need to be
+    /// materialized in registers.
+    TargetConstant,
+    TargetConstantFP,
+
+    /// TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or
+    /// anything else with this node, and this is valid in the target-specific
+    /// dag, turning into a GlobalAddress operand.
+    TargetGlobalAddress,
+    TargetGlobalTLSAddress,
+    TargetFrameIndex,
+    TargetJumpTable,
+    TargetConstantPool,
+    TargetExternalSymbol,
+    TargetBlockAddress,
+
+    MCSymbol,
+
+    /// TargetIndex - Like a constant pool entry, but with completely
+    /// target-dependent semantics. Holds target flags, a 32-bit index, and a
+    /// 64-bit index. Targets can use this however they like.
+    TargetIndex,
+
+    /// RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...)
+    /// This node represents a target intrinsic function with no side effects.
+    /// The first operand is the ID number of the intrinsic from the
+    /// llvm::Intrinsic namespace.  The operands to the intrinsic follow.  The
+    /// node returns the result of the intrinsic.
+    INTRINSIC_WO_CHAIN,
+
+    /// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...)
+    /// This node represents a target intrinsic function with side effects that
+    /// returns a result.  The first operand is a chain pointer.  The second is
+    /// the ID number of the intrinsic from the llvm::Intrinsic namespace.  The
+    /// operands to the intrinsic follow.  The node has two results, the result
+    /// of the intrinsic and an output chain.
+    INTRINSIC_W_CHAIN,
+
+    /// OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...)
+    /// This node represents a target intrinsic function with side effects that
+    /// does not return a result.  The first operand is a chain pointer.  The
+    /// second is the ID number of the intrinsic from the llvm::Intrinsic
+    /// namespace.  The operands to the intrinsic follow.
+    INTRINSIC_VOID,
+
+    /// CopyToReg - This node has three operands: a chain, a register number to
+    /// set to this value, and a value.
+    CopyToReg,
+
+    /// CopyFromReg - This node indicates that the input value is a virtual or
+    /// physical register that is defined outside of the scope of this
+    /// SelectionDAG.  The register is available from the RegisterSDNode object.
+    CopyFromReg,
+
+    /// UNDEF - An undefined node.
+    UNDEF,
+
+    /// EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
+    /// a Constant, which is required to be operand #1) half of the integer or
+    /// float value specified as operand #0.  This is only for use before
+    /// legalization, for values that will be broken into multiple registers.
+    EXTRACT_ELEMENT,
+
+    /// BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
+    /// Given two values of the same integer value type, this produces a value
+    /// twice as big.  Like EXTRACT_ELEMENT, this can only be used before
+    /// legalization. The lower part of the composite value should be in
+    /// element 0 and the upper part should be in element 1.
+    BUILD_PAIR,
+
+    /// MERGE_VALUES - This node takes multiple discrete operands and returns
+    /// them all as its individual results.  This nodes has exactly the same
+    /// number of inputs and outputs. This node is useful for some pieces of the
+    /// code generator that want to think about a single node with multiple
+    /// results, not multiple nodes.
+    MERGE_VALUES,
+
+    /// Simple integer binary arithmetic operators.
+    ADD, SUB, MUL, SDIV, UDIV, SREM, UREM,
+
+    /// SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing
+    /// a signed/unsigned value of type i[2*N], and return the full value as
+    /// two results, each of type iN.
+    SMUL_LOHI, UMUL_LOHI,
+
+    /// SDIVREM/UDIVREM - Divide two integers and produce both a quotient and
+    /// remainder result.
+    SDIVREM, UDIVREM,
+
+    /// CARRY_FALSE - This node is used when folding other nodes,
+    /// like ADDC/SUBC, which indicate the carry result is always false.
+    CARRY_FALSE,
+
+    /// Carry-setting nodes for multiple precision addition and subtraction.
+    /// These nodes take two operands of the same value type, and produce two
+    /// results.  The first result is the normal add or sub result, the second
+    /// result is the carry flag result.
+    /// FIXME: These nodes are deprecated in favor of ADDCARRY and SUBCARRY.
+    /// They are kept around for now to provide a smooth transition path
+    /// toward the use of ADDCARRY/SUBCARRY and will eventually be removed.
+    ADDC, SUBC,
+
+    /// Carry-using nodes for multiple precision addition and subtraction. These
+    /// nodes take three operands: The first two are the normal lhs and rhs to
+    /// the add or sub, and the third is the input carry flag.  These nodes
+    /// produce two results; the normal result of the add or sub, and the output
+    /// carry flag.  These nodes both read and write a carry flag to allow them
+    /// to them to be chained together for add and sub of arbitrarily large
+    /// values.
+    ADDE, SUBE,
+
+    /// Carry-using nodes for multiple precision addition and subtraction.
+    /// These nodes take three operands: The first two are the normal lhs and
+    /// rhs to the add or sub, and the third is a boolean indicating if there
+    /// is an incoming carry. These nodes produce two results: the normal
+    /// result of the add or sub, and the output carry so they can be chained
+    /// together. The use of this opcode is preferable to adde/sube if the
+    /// target supports it, as the carry is a regular value rather than a
+    /// glue, which allows further optimisation.
+    ADDCARRY, SUBCARRY,
+
+    /// RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
+    /// These nodes take two operands: the normal LHS and RHS to the add. They
+    /// produce two results: the normal result of the add, and a boolean that
+    /// indicates if an overflow occurred (*not* a flag, because it may be store
+    /// to memory, etc.).  If the type of the boolean is not i1 then the high
+    /// bits conform to getBooleanContents.
+    /// These nodes are generated from llvm.[su]add.with.overflow intrinsics.
+    SADDO, UADDO,
+
+    /// Same for subtraction.
+    SSUBO, USUBO,
+
+    /// Same for multiplication.
+    SMULO, UMULO,
+
+    /// Simple binary floating point operators.
+    FADD, FSUB, FMUL, FDIV, FREM,
+
+    /// Constrained versions of the binary floating point operators.
+    /// These will be lowered to the simple operators before final selection.
+    /// They are used to limit optimizations while the DAG is being
+    /// optimized.
+    STRICT_FADD, STRICT_FSUB, STRICT_FMUL, STRICT_FDIV, STRICT_FREM,
+    STRICT_FMA,
+
+    /// Constrained versions of libm-equivalent floating point intrinsics.
+    /// These will be lowered to the equivalent non-constrained pseudo-op
+    /// (or expanded to the equivalent library call) before final selection.
+    /// They are used to limit optimizations while the DAG is being optimized.
+    STRICT_FSQRT, STRICT_FPOW, STRICT_FPOWI, STRICT_FSIN, STRICT_FCOS,
+    STRICT_FEXP, STRICT_FEXP2, STRICT_FLOG, STRICT_FLOG10, STRICT_FLOG2,
+    STRICT_FRINT, STRICT_FNEARBYINT,
+
+    /// FMA - Perform a * b + c with no intermediate rounding step.
+    FMA,
+
+    /// FMAD - Perform a * b + c, while getting the same result as the
+    /// separately rounded operations.
+    FMAD,
+
+    /// FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.  NOTE: This
+    /// DAG node does not require that X and Y have the same type, just that
+    /// they are both floating point.  X and the result must have the same type.
+    /// FCOPYSIGN(f32, f64) is allowed.
+    FCOPYSIGN,
+
+    /// INT = FGETSIGN(FP) - Return the sign bit of the specified floating point
+    /// value as an integer 0/1 value.
+    FGETSIGN,
+
+    /// Returns platform specific canonical encoding of a floating point number.
+    FCANONICALIZE,
+
+    /// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the
+    /// specified, possibly variable, elements.  The number of elements is
+    /// required to be a power of two.  The types of the operands must all be
+    /// the same and must match the vector element type, except that integer
+    /// types are allowed to be larger than the element type, in which case
+    /// the operands are implicitly truncated.
+    BUILD_VECTOR,
+
+    /// INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element
+    /// at IDX replaced with VAL.  If the type of VAL is larger than the vector
+    /// element type then VAL is truncated before replacement.
+    INSERT_VECTOR_ELT,
+
+    /// EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR
+    /// identified by the (potentially variable) element number IDX.  If the
+    /// return type is an integer type larger than the element type of the
+    /// vector, the result is extended to the width of the return type. In
+    /// that case, the high bits are undefined.
+    EXTRACT_VECTOR_ELT,
+
+    /// CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of
+    /// vector type with the same length and element type, this produces a
+    /// concatenated vector result value, with length equal to the sum of the
+    /// lengths of the input vectors.
+    CONCAT_VECTORS,
+
+    /// INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector
+    /// with VECTOR2 inserted into VECTOR1 at the (potentially
+    /// variable) element number IDX, which must be a multiple of the
+    /// VECTOR2 vector length.  The elements of VECTOR1 starting at
+    /// IDX are overwritten with VECTOR2.  Elements IDX through
+    /// vector_length(VECTOR2) must be valid VECTOR1 indices.
+    INSERT_SUBVECTOR,
+
+    /// EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an
+    /// vector value) starting with the element number IDX, which must be a
+    /// constant multiple of the result vector length.
+    EXTRACT_SUBVECTOR,
+
+    /// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
+    /// VEC1/VEC2.  A VECTOR_SHUFFLE node also contains an array of constant int
+    /// values that indicate which value (or undef) each result element will
+    /// get.  These constant ints are accessible through the
+    /// ShuffleVectorSDNode class.  This is quite similar to the Altivec
+    /// 'vperm' instruction, except that the indices must be constants and are
+    /// in terms of the element size of VEC1/VEC2, not in terms of bytes.
+    VECTOR_SHUFFLE,
+
+    /// SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a
+    /// scalar value into element 0 of the resultant vector type.  The top
+    /// elements 1 to N-1 of the N-element vector are undefined.  The type
+    /// of the operand must match the vector element type, except when they
+    /// are integer types.  In this case the operand is allowed to be wider
+    /// than the vector element type, and is implicitly truncated to it.
+    SCALAR_TO_VECTOR,
+
+    /// MULHU/MULHS - Multiply high - Multiply two integers of type iN,
+    /// producing an unsigned/signed value of type i[2*N], then return the top
+    /// part.
+    MULHU, MULHS,
+
+    /// [US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned
+    /// integers.
+    SMIN, SMAX, UMIN, UMAX,
+
+    /// Bitwise operators - logical and, logical or, logical xor.
+    AND, OR, XOR,
+
+    /// ABS - Determine the unsigned absolute value of a signed integer value of
+    /// the same bitwidth.
+    /// Note: A value of INT_MIN will return INT_MIN, no saturation or overflow
+    /// is performed.
+    ABS,
+
+    /// Shift and rotation operations.  After legalization, the type of the
+    /// shift amount is known to be TLI.getShiftAmountTy().  Before legalization
+    /// the shift amount can be any type, but care must be taken to ensure it is
+    /// large enough.  TLI.getShiftAmountTy() is i8 on some targets, but before
+    /// legalization, types like i1024 can occur and i8 doesn't have enough bits
+    /// to represent the shift amount.
+    /// When the 1st operand is a vector, the shift amount must be in the same
+    /// type. (TLI.getShiftAmountTy() will return the same type when the input
+    /// type is a vector.)
+    SHL, SRA, SRL, ROTL, ROTR,
+
+    /// Byte Swap and Counting operators.
+    BSWAP, CTTZ, CTLZ, CTPOP, BITREVERSE,
+
+    /// Bit counting operators with an undefined result for zero inputs.
+    CTTZ_ZERO_UNDEF, CTLZ_ZERO_UNDEF,
+
+    /// Select(COND, TRUEVAL, FALSEVAL).  If the type of the boolean COND is not
+    /// i1 then the high bits must conform to getBooleanContents.
+    SELECT,
+
+    /// Select with a vector condition (op #0) and two vector operands (ops #1
+    /// and #2), returning a vector result.  All vectors have the same length.
+    /// Much like the scalar select and setcc, each bit in the condition selects
+    /// whether the corresponding result element is taken from op #1 or op #2.
+    /// At first, the VSELECT condition is of vXi1 type. Later, targets may
+    /// change the condition type in order to match the VSELECT node using a
+    /// pattern. The condition follows the BooleanContent format of the target.
+    VSELECT,
+
+    /// Select with condition operator - This selects between a true value and
+    /// a false value (ops #2 and #3) based on the boolean result of comparing
+    /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
+    /// condition code in op #4, a CondCodeSDNode.
+    SELECT_CC,
+
+    /// SetCC operator - This evaluates to a true value iff the condition is
+    /// true.  If the result value type is not i1 then the high bits conform
+    /// to getBooleanContents.  The operands to this are the left and right
+    /// operands to compare (ops #0, and #1) and the condition code to compare
+    /// them with (op #2) as a CondCodeSDNode. If the operands are vector types
+    /// then the result type must also be a vector type.
+    SETCC,
+
+    /// Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, and
+    /// op #2 is a *carry value*. This operator checks the result of
+    /// "LHS - RHS - Carry", and can be used to compare two wide integers:
+    /// (setcce lhshi rhshi (subc lhslo rhslo) cc). Only valid for integers.
+    /// FIXME: This node is deprecated in favor of SETCCCARRY.
+    /// It is kept around for now to provide a smooth transition path
+    /// toward the use of SETCCCARRY and will eventually be removed.
+    SETCCE,
+
+    /// Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but
+    /// op #2 is a boolean indicating if there is an incoming carry. This
+    /// operator checks the result of "LHS - RHS - Carry", and can be used to
+    /// compare two wide integers: (setcce lhshi rhshi (subc lhslo rhslo) cc).
+    /// Only valid for integers.
+    SETCCCARRY,
+
+    /// SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
+    /// integer shift operations.  The operation ordering is:
+    ///       [Lo,Hi] = op [LoLHS,HiLHS], Amt
+    SHL_PARTS, SRA_PARTS, SRL_PARTS,
+
+    /// Conversion operators.  These are all single input single output
+    /// operations.  For all of these, the result type must be strictly
+    /// wider or narrower (depending on the operation) than the source
+    /// type.
+
+    /// SIGN_EXTEND - Used for integer types, replicating the sign bit
+    /// into new bits.
+    SIGN_EXTEND,
+
+    /// ZERO_EXTEND - Used for integer types, zeroing the new bits.
+    ZERO_EXTEND,
+
+    /// ANY_EXTEND - Used for integer types.  The high bits are undefined.
+    ANY_EXTEND,
+
+    /// TRUNCATE - Completely drop the high bits.
+    TRUNCATE,
+
+    /// [SU]INT_TO_FP - These operators convert integers (whose interpreted sign
+    /// depends on the first letter) to floating point.
+    SINT_TO_FP,
+    UINT_TO_FP,
+
+    /// SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to
+    /// sign extend a small value in a large integer register (e.g. sign
+    /// extending the low 8 bits of a 32-bit register to fill the top 24 bits
+    /// with the 7th bit).  The size of the smaller type is indicated by the 1th
+    /// operand, a ValueType node.
+    SIGN_EXTEND_INREG,
+
+    /// ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an
+    /// in-register any-extension of the low lanes of an integer vector. The
+    /// result type must have fewer elements than the operand type, and those
+    /// elements must be larger integer types such that the total size of the
+    /// operand type and the result type match. Each of the low operand
+    /// elements is any-extended into the corresponding, wider result
+    /// elements with the high bits becoming undef.
+    ANY_EXTEND_VECTOR_INREG,
+
+    /// SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an
+    /// in-register sign-extension of the low lanes of an integer vector. The
+    /// result type must have fewer elements than the operand type, and those
+    /// elements must be larger integer types such that the total size of the
+    /// operand type and the result type match. Each of the low operand
+    /// elements is sign-extended into the corresponding, wider result
+    /// elements.
+    // FIXME: The SIGN_EXTEND_INREG node isn't specifically limited to
+    // scalars, but it also doesn't handle vectors well. Either it should be
+    // restricted to scalars or this node (and its handling) should be merged
+    // into it.
+    SIGN_EXTEND_VECTOR_INREG,
+
+    /// ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an
+    /// in-register zero-extension of the low lanes of an integer vector. The
+    /// result type must have fewer elements than the operand type, and those
+    /// elements must be larger integer types such that the total size of the
+    /// operand type and the result type match. Each of the low operand
+    /// elements is zero-extended into the corresponding, wider result
+    /// elements.
+    ZERO_EXTEND_VECTOR_INREG,
+
+    /// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned
+    /// integer.
+    FP_TO_SINT,
+    FP_TO_UINT,
+
+    /// X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type
+    /// down to the precision of the destination VT.  TRUNC is a flag, which is
+    /// always an integer that is zero or one.  If TRUNC is 0, this is a
+    /// normal rounding, if it is 1, this FP_ROUND is known to not change the
+    /// value of Y.
+    ///
+    /// The TRUNC = 1 case is used in cases where we know that the value will
+    /// not be modified by the node, because Y is not using any of the extra
+    /// precision of source type.  This allows certain transformations like
+    /// FP_EXTEND(FP_ROUND(X,1)) -> X which are not safe for
+    /// FP_EXTEND(FP_ROUND(X,0)) because the extra bits aren't removed.
+    FP_ROUND,
+
+    /// FLT_ROUNDS_ - Returns current rounding mode:
+    /// -1 Undefined
+    ///  0 Round to 0
+    ///  1 Round to nearest
+    ///  2 Round to +inf
+    ///  3 Round to -inf
+    FLT_ROUNDS_,
+
+    /// X = FP_ROUND_INREG(Y, VT) - This operator takes an FP register, and
+    /// rounds it to a floating point value.  It then promotes it and returns it
+    /// in a register of the same size.  This operation effectively just
+    /// discards excess precision.  The type to round down to is specified by
+    /// the VT operand, a VTSDNode.
+    FP_ROUND_INREG,
+
+    /// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
+    FP_EXTEND,
+
+    /// BITCAST - This operator converts between integer, vector and FP
+    /// values, as if the value was stored to memory with one type and loaded
+    /// from the same address with the other type (or equivalently for vector
+    /// format conversions, etc).  The source and result are required to have
+    /// the same bit size (e.g.  f32 <-> i32).  This can also be used for
+    /// int-to-int or fp-to-fp conversions, but that is a noop, deleted by
+    /// getNode().
+    ///
+    /// This operator is subtly different from the bitcast instruction from
+    /// LLVM-IR since this node may change the bits in the register. For
+    /// example, this occurs on big-endian NEON and big-endian MSA where the
+    /// layout of the bits in the register depends on the vector type and this
+    /// operator acts as a shuffle operation for some vector type combinations.
+    BITCAST,
+
+    /// ADDRSPACECAST - This operator converts between pointers of different
+    /// address spaces.
+    ADDRSPACECAST,
+
+    /// FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions
+    /// and truncation for half-precision (16 bit) floating numbers. These nodes
+    /// form a semi-softened interface for dealing with f16 (as an i16), which
+    /// is often a storage-only type but has native conversions.
+    FP16_TO_FP, FP_TO_FP16,
+
+    /// FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
+    /// FLOG, FLOG2, FLOG10, FEXP, FEXP2,
+    /// FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary
+    /// floating point operations. These are inspired by libm.
+    FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
+    FLOG, FLOG2, FLOG10, FEXP, FEXP2,
+    FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR,
+    /// FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two
+    /// values.
+    /// In the case where a single input is NaN, the non-NaN input is returned.
+    ///
+    /// The return value of (FMINNUM 0.0, -0.0) could be either 0.0 or -0.0.
+    FMINNUM, FMAXNUM,
+    /// FMINNAN/FMAXNAN - Behave identically to FMINNUM/FMAXNUM, except that
+    /// when a single input is NaN, NaN is returned.
+    FMINNAN, FMAXNAN,
+
+    /// FSINCOS - Compute both fsin and fcos as a single operation.
+    FSINCOS,
+
+    /// LOAD and STORE have token chains as their first operand, then the same
+    /// operands as an LLVM load/store instruction, then an offset node that
+    /// is added / subtracted from the base pointer to form the address (for
+    /// indexed memory ops).
+    LOAD, STORE,
+
+    /// DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned
+    /// to a specified boundary.  This node always has two return values: a new
+    /// stack pointer value and a chain. The first operand is the token chain,
+    /// the second is the number of bytes to allocate, and the third is the
+    /// alignment boundary.  The size is guaranteed to be a multiple of the
+    /// stack alignment, and the alignment is guaranteed to be bigger than the
+    /// stack alignment (if required) or 0 to get standard stack alignment.
+    DYNAMIC_STACKALLOC,
+
+    /// Control flow instructions.  These all have token chains.
+
+    /// BR - Unconditional branch.  The first operand is the chain
+    /// operand, the second is the MBB to branch to.
+    BR,
+
+    /// BRIND - Indirect branch.  The first operand is the chain, the second
+    /// is the value to branch to, which must be of the same type as the
+    /// target's pointer type.
+    BRIND,
+
+    /// BR_JT - Jumptable branch. The first operand is the chain, the second
+    /// is the jumptable index, the last one is the jumptable entry index.
+    BR_JT,
+
+    /// BRCOND - Conditional branch.  The first operand is the chain, the
+    /// second is the condition, the third is the block to branch to if the
+    /// condition is true.  If the type of the condition is not i1, then the
+    /// high bits must conform to getBooleanContents.
+    BRCOND,
+
+    /// BR_CC - Conditional branch.  The behavior is like that of SELECT_CC, in
+    /// that the condition is represented as condition code, and two nodes to
+    /// compare, rather than as a combined SetCC node.  The operands in order
+    /// are chain, cc, lhs, rhs, block to branch to if condition is true.
+    BR_CC,
+
+    /// INLINEASM - Represents an inline asm block.  This node always has two
+    /// return values: a chain and a flag result.  The inputs are as follows:
+    ///   Operand #0  : Input chain.
+    ///   Operand #1  : a ExternalSymbolSDNode with a pointer to the asm string.
+    ///   Operand #2  : a MDNodeSDNode with the !srcloc metadata.
+    ///   Operand #3  : HasSideEffect, IsAlignStack bits.
+    ///   After this, it is followed by a list of operands with this format:
+    ///     ConstantSDNode: Flags that encode whether it is a mem or not, the
+    ///                     of operands that follow, etc.  See InlineAsm.h.
+    ///     ... however many operands ...
+    ///   Operand #last: Optional, an incoming flag.
+    ///
+    /// The variable width operands are required to represent target addressing
+    /// modes as a single "operand", even though they may have multiple
+    /// SDOperands.
+    INLINEASM,
+
+    /// EH_LABEL - Represents a label in mid basic block used to track
+    /// locations needed for debug and exception handling tables.  These nodes
+    /// take a chain as input and return a chain.
+    EH_LABEL,
+
+    /// ANNOTATION_LABEL - Represents a mid basic block label used by
+    /// annotations. This should remain within the basic block and be ordered
+    /// with respect to other call instructions, but loads and stores may float
+    /// past it.
+    ANNOTATION_LABEL,
+
+    /// CATCHPAD - Represents a catchpad instruction.
+    CATCHPAD,
+
+    /// CATCHRET - Represents a return from a catch block funclet. Used for
+    /// MSVC compatible exception handling. Takes a chain operand and a
+    /// destination basic block operand.
+    CATCHRET,
+
+    /// CLEANUPRET - Represents a return from a cleanup block funclet.  Used for
+    /// MSVC compatible exception handling. Takes only a chain operand.
+    CLEANUPRET,
+
+    /// STACKSAVE - STACKSAVE has one operand, an input chain.  It produces a
+    /// value, the same type as the pointer type for the system, and an output
+    /// chain.
+    STACKSAVE,
+
+    /// STACKRESTORE has two operands, an input chain and a pointer to restore
+    /// to it returns an output chain.
+    STACKRESTORE,
+
+    /// CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end
+    /// of a call sequence, and carry arbitrary information that target might
+    /// want to know.  The first operand is a chain, the rest are specified by
+    /// the target and not touched by the DAG optimizers.
+    /// Targets that may use stack to pass call arguments define additional
+    /// operands:
+    /// - size of the call frame part that must be set up within the
+    ///   CALLSEQ_START..CALLSEQ_END pair,
+    /// - part of the call frame prepared prior to CALLSEQ_START.
+    /// Both these parameters must be constants, their sum is the total call
+    /// frame size.
+    /// CALLSEQ_START..CALLSEQ_END pairs may not be nested.
+    CALLSEQ_START,  // Beginning of a call sequence
+    CALLSEQ_END,    // End of a call sequence
+
+    /// VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
+    /// and the alignment. It returns a pair of values: the vaarg value and a
+    /// new chain.
+    VAARG,
+
+    /// VACOPY - VACOPY has 5 operands: an input chain, a destination pointer,
+    /// a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the
+    /// source.
+    VACOPY,
+
+    /// VAEND, VASTART - VAEND and VASTART have three operands: an input chain,
+    /// pointer, and a SRCVALUE.
+    VAEND, VASTART,
+
+    /// SRCVALUE - This is a node type that holds a Value* that is used to
+    /// make reference to a value in the LLVM IR.
+    SRCVALUE,
+
+    /// MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
+    /// reference metadata in the IR.
+    MDNODE_SDNODE,
+
+    /// PCMARKER - This corresponds to the pcmarker intrinsic.
+    PCMARKER,
+
+    /// READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
+    /// It produces a chain and one i64 value. The only operand is a chain.
+    /// If i64 is not legal, the result will be expanded into smaller values.
+    /// Still, it returns an i64, so targets should set legality for i64.
+    /// The result is the content of the architecture-specific cycle
+    /// counter-like register (or other high accuracy low latency clock source).
+    READCYCLECOUNTER,
+
+    /// HANDLENODE node - Used as a handle for various purposes.
+    HANDLENODE,
+
+    /// INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.  It
+    /// takes as input a token chain, the pointer to the trampoline, the pointer
+    /// to the nested function, the pointer to pass for the 'nest' parameter, a
+    /// SRCVALUE for the trampoline and another for the nested function
+    /// (allowing targets to access the original Function*).
+    /// It produces a token chain as output.
+    INIT_TRAMPOLINE,
+
+    /// ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
+    /// It takes a pointer to the trampoline and produces a (possibly) new
+    /// pointer to the same trampoline with platform-specific adjustments
+    /// applied.  The pointer it returns points to an executable block of code.
+    ADJUST_TRAMPOLINE,
+
+    /// TRAP - Trapping instruction
+    TRAP,
+
+    /// DEBUGTRAP - Trap intended to get the attention of a debugger.
+    DEBUGTRAP,
+
+    /// PREFETCH - This corresponds to a prefetch intrinsic. The first operand
+    /// is the chain.  The other operands are the address to prefetch,
+    /// read / write specifier, locality specifier and instruction / data cache
+    /// specifier.
+    PREFETCH,
+
+    /// OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope)
+    /// This corresponds to the fence instruction. It takes an input chain, and
+    /// two integer constants: an AtomicOrdering and a SynchronizationScope.
+    ATOMIC_FENCE,
+
+    /// Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr)
+    /// This corresponds to "load atomic" instruction.
+    ATOMIC_LOAD,
+
+    /// OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val)
+    /// This corresponds to "store atomic" instruction.
+    ATOMIC_STORE,
+
+    /// Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
+    /// For double-word atomic operations:
+    /// ValLo, ValHi, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmpLo, cmpHi,
+    ///                                          swapLo, swapHi)
+    /// This corresponds to the cmpxchg instruction.
+    ATOMIC_CMP_SWAP,
+
+    /// Val, Success, OUTCHAIN
+    ///     = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap)
+    /// N.b. this is still a strong cmpxchg operation, so
+    /// Success == "Val == cmp".
+    ATOMIC_CMP_SWAP_WITH_SUCCESS,
+
+    /// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
+    /// Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
+    /// For double-word atomic operations:
+    /// ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi)
+    /// ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi)
+    /// These correspond to the atomicrmw instruction.
+    ATOMIC_SWAP,
+    ATOMIC_LOAD_ADD,
+    ATOMIC_LOAD_SUB,
+    ATOMIC_LOAD_AND,
+    ATOMIC_LOAD_CLR,
+    ATOMIC_LOAD_OR,
+    ATOMIC_LOAD_XOR,
+    ATOMIC_LOAD_NAND,
+    ATOMIC_LOAD_MIN,
+    ATOMIC_LOAD_MAX,
+    ATOMIC_LOAD_UMIN,
+    ATOMIC_LOAD_UMAX,
+
+    // Masked load and store - consecutive vector load and store operations
+    // with additional mask operand that prevents memory accesses to the
+    // masked-off lanes.
+    MLOAD, MSTORE,
+
+    // Masked gather and scatter - load and store operations for a vector of
+    // random addresses with additional mask operand that prevents memory
+    // accesses to the masked-off lanes.
+    MGATHER, MSCATTER,
+
+    /// This corresponds to the llvm.lifetime.* intrinsics. The first operand
+    /// is the chain and the second operand is the alloca pointer.
+    LIFETIME_START, LIFETIME_END,
+
+    /// GC_TRANSITION_START/GC_TRANSITION_END - These operators mark the
+    /// beginning and end of GC transition  sequence, and carry arbitrary
+    /// information that target might need for lowering.  The first operand is
+    /// a chain, the rest are specified by the target and not touched by the DAG
+    /// optimizers. GC_TRANSITION_START..GC_TRANSITION_END pairs may not be
+    /// nested.
+    GC_TRANSITION_START,
+    GC_TRANSITION_END,
+
+    /// GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of
+    /// the most recent dynamic alloca. For most targets that would be 0, but
+    /// for some others (e.g. PowerPC, PowerPC64) that would be compile-time
+    /// known nonzero constant. The only operand here is the chain.
+    GET_DYNAMIC_AREA_OFFSET,
+
+    /// Generic reduction nodes. These nodes represent horizontal vector
+    /// reduction operations, producing a scalar result.
+    /// The STRICT variants perform reductions in sequential order. The first
+    /// operand is an initial scalar accumulator value, and the second operand
+    /// is the vector to reduce.
+    VECREDUCE_STRICT_FADD, VECREDUCE_STRICT_FMUL,
+    /// These reductions are non-strict, and have a single vector operand.
+    VECREDUCE_FADD, VECREDUCE_FMUL,
+    VECREDUCE_ADD, VECREDUCE_MUL,
+    VECREDUCE_AND, VECREDUCE_OR, VECREDUCE_XOR,
+    VECREDUCE_SMAX, VECREDUCE_SMIN, VECREDUCE_UMAX, VECREDUCE_UMIN,
+    /// FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
+    VECREDUCE_FMAX, VECREDUCE_FMIN,
+
+    /// BUILTIN_OP_END - This must be the last enum value in this list.
+    /// The target-specific pre-isel opcode values start here.
+    BUILTIN_OP_END
+  };
+
+  /// FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations
+  /// which do not reference a specific memory location should be less than
+  /// this value. Those that do must not be less than this value, and can
+  /// be used with SelectionDAG::getMemIntrinsicNode.
+  static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END+400;
+
+  //===--------------------------------------------------------------------===//
+  /// MemIndexedMode enum - This enum defines the load / store indexed
+  /// addressing modes.
+  ///
+  /// UNINDEXED    "Normal" load / store. The effective address is already
+  ///              computed and is available in the base pointer. The offset
+  ///              operand is always undefined. In addition to producing a
+  ///              chain, an unindexed load produces one value (result of the
+  ///              load); an unindexed store does not produce a value.
+  ///
+  /// PRE_INC      Similar to the unindexed mode where the effective address is
+  /// PRE_DEC      the value of the base pointer add / subtract the offset.
+  ///              It considers the computation as being folded into the load /
+  ///              store operation (i.e. the load / store does the address
+  ///              computation as well as performing the memory transaction).
+  ///              The base operand is always undefined. In addition to
+  ///              producing a chain, pre-indexed load produces two values
+  ///              (result of the load and the result of the address
+  ///              computation); a pre-indexed store produces one value (result
+  ///              of the address computation).
+  ///
+  /// POST_INC     The effective address is the value of the base pointer. The
+  /// POST_DEC     value of the offset operand is then added to / subtracted
+  ///              from the base after memory transaction. In addition to
+  ///              producing a chain, post-indexed load produces two values
+  ///              (the result of the load and the result of the base +/- offset
+  ///              computation); a post-indexed store produces one value (the
+  ///              the result of the base +/- offset computation).
+  enum MemIndexedMode {
+    UNINDEXED = 0,
+    PRE_INC,
+    PRE_DEC,
+    POST_INC,
+    POST_DEC
+  };
+
+  static const int LAST_INDEXED_MODE = POST_DEC + 1;
+
+  //===--------------------------------------------------------------------===//
+  /// LoadExtType enum - This enum defines the three variants of LOADEXT
+  /// (load with extension).
+  ///
+  /// SEXTLOAD loads the integer operand and sign extends it to a larger
+  ///          integer result type.
+  /// ZEXTLOAD loads the integer operand and zero extends it to a larger
+  ///          integer result type.
+  /// EXTLOAD  is used for two things: floating point extending loads and
+  ///          integer extending loads [the top bits are undefined].
+  enum LoadExtType {
+    NON_EXTLOAD = 0,
+    EXTLOAD,
+    SEXTLOAD,
+    ZEXTLOAD
+  };
+
+  static const int LAST_LOADEXT_TYPE = ZEXTLOAD + 1;
+
+  NodeType getExtForLoadExtType(bool IsFP, LoadExtType);
+
+  //===--------------------------------------------------------------------===//
+  /// ISD::CondCode enum - These are ordered carefully to make the bitfields
+  /// below work out, when considering SETFALSE (something that never exists
+  /// dynamically) as 0.  "U" -> Unsigned (for integer operands) or Unordered
+  /// (for floating point), "L" -> Less than, "G" -> Greater than, "E" -> Equal
+  /// to.  If the "N" column is 1, the result of the comparison is undefined if
+  /// the input is a NAN.
+  ///
+  /// All of these (except for the 'always folded ops') should be handled for
+  /// floating point.  For integer, only the SETEQ,SETNE,SETLT,SETLE,SETGT,
+  /// SETGE,SETULT,SETULE,SETUGT, and SETUGE opcodes are used.
+  ///
+  /// Note that these are laid out in a specific order to allow bit-twiddling
+  /// to transform conditions.
+  enum CondCode {
+    // Opcode          N U L G E       Intuitive operation
+    SETFALSE,      //    0 0 0 0       Always false (always folded)
+    SETOEQ,        //    0 0 0 1       True if ordered and equal
+    SETOGT,        //    0 0 1 0       True if ordered and greater than
+    SETOGE,        //    0 0 1 1       True if ordered and greater than or equal
+    SETOLT,        //    0 1 0 0       True if ordered and less than
+    SETOLE,        //    0 1 0 1       True if ordered and less than or equal
+    SETONE,        //    0 1 1 0       True if ordered and operands are unequal
+    SETO,          //    0 1 1 1       True if ordered (no nans)
+    SETUO,         //    1 0 0 0       True if unordered: isnan(X) | isnan(Y)
+    SETUEQ,        //    1 0 0 1       True if unordered or equal
+    SETUGT,        //    1 0 1 0       True if unordered or greater than
+    SETUGE,        //    1 0 1 1       True if unordered, greater than, or equal
+    SETULT,        //    1 1 0 0       True if unordered or less than
+    SETULE,        //    1 1 0 1       True if unordered, less than, or equal
+    SETUNE,        //    1 1 1 0       True if unordered or not equal
+    SETTRUE,       //    1 1 1 1       Always true (always folded)
+    // Don't care operations: undefined if the input is a nan.
+    SETFALSE2,     //  1 X 0 0 0       Always false (always folded)
+    SETEQ,         //  1 X 0 0 1       True if equal
+    SETGT,         //  1 X 0 1 0       True if greater than
+    SETGE,         //  1 X 0 1 1       True if greater than or equal
+    SETLT,         //  1 X 1 0 0       True if less than
+    SETLE,         //  1 X 1 0 1       True if less than or equal
+    SETNE,         //  1 X 1 1 0       True if not equal
+    SETTRUE2,      //  1 X 1 1 1       Always true (always folded)
+
+    SETCC_INVALID       // Marker value.
+  };
+
+  /// Return true if this is a setcc instruction that performs a signed
+  /// comparison when used with integer operands.
+  inline bool isSignedIntSetCC(CondCode Code) {
+    return Code == SETGT || Code == SETGE || Code == SETLT || Code == SETLE;
+  }
+
+  /// Return true if this is a setcc instruction that performs an unsigned
+  /// comparison when used with integer operands.
+  inline bool isUnsignedIntSetCC(CondCode Code) {
+    return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE;
+  }
+
+  /// Return true if the specified condition returns true if the two operands to
+  /// the condition are equal. Note that if one of the two operands is a NaN,
+  /// this value is meaningless.
+  inline bool isTrueWhenEqual(CondCode Cond) {
+    return ((int)Cond & 1) != 0;
+  }
+
+  /// This function returns 0 if the condition is always false if an operand is
+  /// a NaN, 1 if the condition is always true if the operand is a NaN, and 2 if
+  /// the condition is undefined if the operand is a NaN.
+  inline unsigned getUnorderedFlavor(CondCode Cond) {
+    return ((int)Cond >> 3) & 3;
+  }
+
+  /// Return the operation corresponding to !(X op Y), where 'op' is a valid
+  /// SetCC operation.
+  CondCode getSetCCInverse(CondCode Operation, bool isInteger);
+
+  /// Return the operation corresponding to (Y op X) when given the operation
+  /// for (X op Y).
+  CondCode getSetCCSwappedOperands(CondCode Operation);
+
+  /// Return the result of a logical OR between different comparisons of
+  /// identical values: ((X op1 Y) | (X op2 Y)). This function returns
+  /// SETCC_INVALID if it is not possible to represent the resultant comparison.
+  CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, bool isInteger);
+
+  /// Return the result of a logical AND between different comparisons of
+  /// identical values: ((X op1 Y) & (X op2 Y)). This function returns
+  /// SETCC_INVALID if it is not possible to represent the resultant comparison.
+  CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, bool isInteger);
+
+} // end llvm::ISD namespace
+
+} // end llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/IntrinsicLowering.h b/linux-x64/clang/include/llvm/CodeGen/IntrinsicLowering.h
new file mode 100644
index 0000000..597d684
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/IntrinsicLowering.h
@@ -0,0 +1,54 @@
+//===-- IntrinsicLowering.h - Intrinsic Function Lowering -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the IntrinsicLowering interface.  This interface allows
+// addition of domain-specific or front-end specific intrinsics to LLVM without
+// having to modify all of the C backend or interpreter.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_INTRINSICLOWERING_H
+#define LLVM_CODEGEN_INTRINSICLOWERING_H
+
+#include "llvm/IR/Intrinsics.h"
+
+namespace llvm {
+class CallInst;
+class Module;
+class DataLayout;
+
+class IntrinsicLowering {
+  const DataLayout &DL;
+
+  bool Warned;
+
+public:
+  explicit IntrinsicLowering(const DataLayout &DL) : DL(DL), Warned(false) {}
+
+  /// Add all of the prototypes that might be needed by an intrinsic lowering
+  /// implementation to be inserted into the module specified.
+  void AddPrototypes(Module &M);
+
+  /// Replace a call to the specified intrinsic function.
+  /// If an intrinsic function must be implemented by the code generator
+  /// (such as va_start), this function should print a message and abort.
+  ///
+  /// Otherwise, if an intrinsic function call can be lowered, the code to
+  /// implement it (often a call to a non-intrinsic function) is inserted
+  /// _after_ the call instruction and the call is deleted. The caller must
+  /// be capable of handling this kind of change.
+  void LowerIntrinsicCall(CallInst *CI);
+
+  /// Try to replace a call instruction with a call to a bswap intrinsic. Return
+  /// false if the call is not a simple integer bswap.
+  static bool LowerToByteSwap(CallInst *CI);
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/LatencyPriorityQueue.h b/linux-x64/clang/include/llvm/CodeGen/LatencyPriorityQueue.h
new file mode 100644
index 0000000..988e6d6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LatencyPriorityQueue.h
@@ -0,0 +1,98 @@
+//===---- LatencyPriorityQueue.h - A latency-oriented priority queue ------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the LatencyPriorityQueue class, which is a
+// SchedulingPriorityQueue that schedules using latency information to
+// reduce the length of the critical path through the basic block.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LATENCYPRIORITYQUEUE_H
+#define LLVM_CODEGEN_LATENCYPRIORITYQUEUE_H
+
+#include "llvm/CodeGen/ScheduleDAG.h"
+
+namespace llvm {
+  class LatencyPriorityQueue;
+
+  /// Sorting functions for the Available queue.
+  struct latency_sort {
+    LatencyPriorityQueue *PQ;
+    explicit latency_sort(LatencyPriorityQueue *pq) : PQ(pq) {}
+
+    bool operator()(const SUnit* left, const SUnit* right) const;
+  };
+
+  class LatencyPriorityQueue : public SchedulingPriorityQueue {
+    // SUnits - The SUnits for the current graph.
+    std::vector<SUnit> *SUnits;
+
+    /// NumNodesSolelyBlocking - This vector contains, for every node in the
+    /// Queue, the number of nodes that the node is the sole unscheduled
+    /// predecessor for.  This is used as a tie-breaker heuristic for better
+    /// mobility.
+    std::vector<unsigned> NumNodesSolelyBlocking;
+
+    /// Queue - The queue.
+    std::vector<SUnit*> Queue;
+    latency_sort Picker;
+
+  public:
+    LatencyPriorityQueue() : Picker(this) {
+    }
+
+    bool isBottomUp() const override { return false; }
+
+    void initNodes(std::vector<SUnit> &sunits) override {
+      SUnits = &sunits;
+      NumNodesSolelyBlocking.resize(SUnits->size(), 0);
+    }
+
+    void addNode(const SUnit *SU) override {
+      NumNodesSolelyBlocking.resize(SUnits->size(), 0);
+    }
+
+    void updateNode(const SUnit *SU) override {
+    }
+
+    void releaseState() override {
+      SUnits = nullptr;
+    }
+
+    unsigned getLatency(unsigned NodeNum) const {
+      assert(NodeNum < (*SUnits).size());
+      return (*SUnits)[NodeNum].getHeight();
+    }
+
+    unsigned getNumSolelyBlockNodes(unsigned NodeNum) const {
+      assert(NodeNum < NumNodesSolelyBlocking.size());
+      return NumNodesSolelyBlocking[NodeNum];
+    }
+
+    bool empty() const override { return Queue.empty(); }
+
+    void push(SUnit *U) override;
+
+    SUnit *pop() override;
+
+    void remove(SUnit *SU) override;
+
+    // scheduledNode - As nodes are scheduled, we look to see if there are any
+    // successor nodes that have a single unscheduled predecessor.  If so, that
+    // single predecessor has a higher priority, since scheduling it will make
+    // the node available.
+    void scheduledNode(SUnit *Node) override;
+
+private:
+    void AdjustPriorityOfUnscheduledPreds(SUnit *SU);
+    SUnit *getSingleUnscheduledPred(SUnit *SU);
+  };
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/LazyMachineBlockFrequencyInfo.h b/linux-x64/clang/include/llvm/CodeGen/LazyMachineBlockFrequencyInfo.h
new file mode 100644
index 0000000..848ee1d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LazyMachineBlockFrequencyInfo.h
@@ -0,0 +1,76 @@
+///===- LazyMachineBlockFrequencyInfo.h - Lazy Block Frequency -*- C++ -*--===//
+///
+///                     The LLVM Compiler Infrastructure
+///
+/// This file is distributed under the University of Illinois Open Source
+/// License. See LICENSE.TXT for details.
+///
+///===---------------------------------------------------------------------===//
+/// \file
+/// This is an alternative analysis pass to MachineBlockFrequencyInfo.  The
+/// difference is that with this pass the block frequencies are not computed
+/// when the analysis pass is executed but rather when the BFI result is
+/// explicitly requested by the analysis client.
+///
+///===---------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LAZYMACHINEBLOCKFREQUENCYINFO_H
+#define LLVM_ANALYSIS_LAZYMACHINEBLOCKFREQUENCYINFO_H
+
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+
+namespace llvm {
+/// \brief This is an alternative analysis pass to MachineBlockFrequencyInfo.
+/// The difference is that with this pass, the block frequencies are not
+/// computed when the analysis pass is executed but rather when the BFI result
+/// is explicitly requested by the analysis client.
+///
+/// This works by checking querying if MBFI is available and otherwise
+/// generating MBFI on the fly.  In this case the passes required for (LI, DT)
+/// are also queried before being computed on the fly.
+///
+/// Note that it is expected that we wouldn't need this functionality for the
+/// new PM since with the new PM, analyses are executed on demand.
+
+class LazyMachineBlockFrequencyInfoPass : public MachineFunctionPass {
+private:
+  /// If generated on the fly this own the instance.
+  mutable std::unique_ptr<MachineBlockFrequencyInfo> OwnedMBFI;
+
+  /// If generated on the fly this own the instance.
+  mutable std::unique_ptr<MachineLoopInfo> OwnedMLI;
+
+  /// If generated on the fly this own the instance.
+  mutable std::unique_ptr<MachineDominatorTree> OwnedMDT;
+
+  /// The function.
+  MachineFunction *MF = nullptr;
+
+  /// \brief Calculate MBFI and all other analyses that's not available and
+  /// required by BFI.
+  MachineBlockFrequencyInfo &calculateIfNotAvailable() const;
+
+public:
+  static char ID;
+
+  LazyMachineBlockFrequencyInfoPass();
+
+  /// \brief Compute and return the block frequencies.
+  MachineBlockFrequencyInfo &getBFI() { return calculateIfNotAvailable(); }
+
+  /// \brief Compute and return the block frequencies.
+  const MachineBlockFrequencyInfo &getBFI() const {
+    return calculateIfNotAvailable();
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  bool runOnMachineFunction(MachineFunction &F) override;
+  void releaseMemory() override;
+  void print(raw_ostream &OS, const Module *M) const override;
+};
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/LexicalScopes.h b/linux-x64/clang/include/llvm/CodeGen/LexicalScopes.h
new file mode 100644
index 0000000..3ba5034
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LexicalScopes.h
@@ -0,0 +1,258 @@
+//===- LexicalScopes.cpp - Collecting lexical scope info --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements LexicalScopes analysis.
+//
+// This pass collects lexical scope information and maps machine instructions
+// to respective lexical scopes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LEXICALSCOPES_H
+#define LLVM_CODEGEN_LEXICALSCOPES_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include <cassert>
+#include <unordered_map>
+#include <utility>
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineFunction;
+class MachineInstr;
+class MDNode;
+
+//===----------------------------------------------------------------------===//
+/// InsnRange - This is used to track range of instructions with identical
+/// lexical scope.
+///
+using InsnRange = std::pair<const MachineInstr *, const MachineInstr *>;
+
+//===----------------------------------------------------------------------===//
+/// LexicalScope - This class is used to track scope information.
+///
+class LexicalScope {
+public:
+  LexicalScope(LexicalScope *P, const DILocalScope *D, const DILocation *I,
+               bool A)
+      : Parent(P), Desc(D), InlinedAtLocation(I), AbstractScope(A) {
+    assert(D);
+    assert(D->getSubprogram()->getUnit()->getEmissionKind() !=
+           DICompileUnit::NoDebug &&
+           "Don't build lexical scopes for non-debug locations");
+    assert(D->isResolved() && "Expected resolved node");
+    assert((!I || I->isResolved()) && "Expected resolved node");
+    if (Parent)
+      Parent->addChild(this);
+  }
+
+  // Accessors.
+  LexicalScope *getParent() const { return Parent; }
+  const MDNode *getDesc() const { return Desc; }
+  const DILocation *getInlinedAt() const { return InlinedAtLocation; }
+  const DILocalScope *getScopeNode() const { return Desc; }
+  bool isAbstractScope() const { return AbstractScope; }
+  SmallVectorImpl<LexicalScope *> &getChildren() { return Children; }
+  SmallVectorImpl<InsnRange> &getRanges() { return Ranges; }
+
+  /// addChild - Add a child scope.
+  void addChild(LexicalScope *S) { Children.push_back(S); }
+
+  /// openInsnRange - This scope covers instruction range starting from MI.
+  void openInsnRange(const MachineInstr *MI) {
+    if (!FirstInsn)
+      FirstInsn = MI;
+
+    if (Parent)
+      Parent->openInsnRange(MI);
+  }
+
+  /// extendInsnRange - Extend the current instruction range covered by
+  /// this scope.
+  void extendInsnRange(const MachineInstr *MI) {
+    assert(FirstInsn && "MI Range is not open!");
+    LastInsn = MI;
+    if (Parent)
+      Parent->extendInsnRange(MI);
+  }
+
+  /// closeInsnRange - Create a range based on FirstInsn and LastInsn collected
+  /// until now. This is used when a new scope is encountered while walking
+  /// machine instructions.
+  void closeInsnRange(LexicalScope *NewScope = nullptr) {
+    assert(LastInsn && "Last insn missing!");
+    Ranges.push_back(InsnRange(FirstInsn, LastInsn));
+    FirstInsn = nullptr;
+    LastInsn = nullptr;
+    // If Parent dominates NewScope then do not close Parent's instruction
+    // range.
+    if (Parent && (!NewScope || !Parent->dominates(NewScope)))
+      Parent->closeInsnRange(NewScope);
+  }
+
+  /// dominates - Return true if current scope dominates given lexical scope.
+  bool dominates(const LexicalScope *S) const {
+    if (S == this)
+      return true;
+    if (DFSIn < S->getDFSIn() && DFSOut > S->getDFSOut())
+      return true;
+    return false;
+  }
+
+  // Depth First Search support to walk and manipulate LexicalScope hierarchy.
+  unsigned getDFSOut() const { return DFSOut; }
+  void setDFSOut(unsigned O) { DFSOut = O; }
+  unsigned getDFSIn() const { return DFSIn; }
+  void setDFSIn(unsigned I) { DFSIn = I; }
+
+  /// dump - print lexical scope.
+  void dump(unsigned Indent = 0) const;
+
+private:
+  LexicalScope *Parent;                        // Parent to this scope.
+  const DILocalScope *Desc;                    // Debug info descriptor.
+  const DILocation *InlinedAtLocation;         // Location at which this
+                                               // scope is inlined.
+  bool AbstractScope;                          // Abstract Scope
+  SmallVector<LexicalScope *, 4> Children;     // Scopes defined in scope.
+                                               // Contents not owned.
+  SmallVector<InsnRange, 4> Ranges;
+
+  const MachineInstr *LastInsn = nullptr;  // Last instruction of this scope.
+  const MachineInstr *FirstInsn = nullptr; // First instruction of this scope.
+  unsigned DFSIn = 0; // In & Out Depth use to determine scope nesting.
+  unsigned DFSOut = 0;
+};
+
+//===----------------------------------------------------------------------===//
+/// LexicalScopes -  This class provides interface to collect and use lexical
+/// scoping information from machine instruction.
+///
+class LexicalScopes {
+public:
+  LexicalScopes() = default;
+
+  /// initialize - Scan machine function and constuct lexical scope nest, resets
+  /// the instance if necessary.
+  void initialize(const MachineFunction &);
+
+  /// releaseMemory - release memory.
+  void reset();
+
+  /// empty - Return true if there is any lexical scope information available.
+  bool empty() { return CurrentFnLexicalScope == nullptr; }
+
+  /// getCurrentFunctionScope - Return lexical scope for the current function.
+  LexicalScope *getCurrentFunctionScope() const {
+    return CurrentFnLexicalScope;
+  }
+
+  /// getMachineBasicBlocks - Populate given set using machine basic blocks
+  /// which have machine instructions that belong to lexical scope identified by
+  /// DebugLoc.
+  void getMachineBasicBlocks(const DILocation *DL,
+                             SmallPtrSetImpl<const MachineBasicBlock *> &MBBs);
+
+  /// dominates - Return true if DebugLoc's lexical scope dominates at least one
+  /// machine instruction's lexical scope in a given machine basic block.
+  bool dominates(const DILocation *DL, MachineBasicBlock *MBB);
+
+  /// findLexicalScope - Find lexical scope, either regular or inlined, for the
+  /// given DebugLoc. Return NULL if not found.
+  LexicalScope *findLexicalScope(const DILocation *DL);
+
+  /// getAbstractScopesList - Return a reference to list of abstract scopes.
+  ArrayRef<LexicalScope *> getAbstractScopesList() const {
+    return AbstractScopesList;
+  }
+
+  /// findAbstractScope - Find an abstract scope or return null.
+  LexicalScope *findAbstractScope(const DILocalScope *N) {
+    auto I = AbstractScopeMap.find(N);
+    return I != AbstractScopeMap.end() ? &I->second : nullptr;
+  }
+
+  /// findInlinedScope - Find an inlined scope for the given scope/inlined-at.
+  LexicalScope *findInlinedScope(const DILocalScope *N, const DILocation *IA) {
+    auto I = InlinedLexicalScopeMap.find(std::make_pair(N, IA));
+    return I != InlinedLexicalScopeMap.end() ? &I->second : nullptr;
+  }
+
+  /// findLexicalScope - Find regular lexical scope or return null.
+  LexicalScope *findLexicalScope(const DILocalScope *N) {
+    auto I = LexicalScopeMap.find(N);
+    return I != LexicalScopeMap.end() ? &I->second : nullptr;
+  }
+
+  /// dump - Print data structures to dbgs().
+  void dump() const;
+
+  /// getOrCreateAbstractScope - Find or create an abstract lexical scope.
+  LexicalScope *getOrCreateAbstractScope(const DILocalScope *Scope);
+
+private:
+  /// getOrCreateLexicalScope - Find lexical scope for the given Scope/IA. If
+  /// not available then create new lexical scope.
+  LexicalScope *getOrCreateLexicalScope(const DILocalScope *Scope,
+                                        const DILocation *IA = nullptr);
+  LexicalScope *getOrCreateLexicalScope(const DILocation *DL) {
+    return DL ? getOrCreateLexicalScope(DL->getScope(), DL->getInlinedAt())
+              : nullptr;
+  }
+
+  /// getOrCreateRegularScope - Find or create a regular lexical scope.
+  LexicalScope *getOrCreateRegularScope(const DILocalScope *Scope);
+
+  /// getOrCreateInlinedScope - Find or create an inlined lexical scope.
+  LexicalScope *getOrCreateInlinedScope(const DILocalScope *Scope,
+                                        const DILocation *InlinedAt);
+
+  /// extractLexicalScopes - Extract instruction ranges for each lexical scopes
+  /// for the given machine function.
+  void extractLexicalScopes(SmallVectorImpl<InsnRange> &MIRanges,
+                            DenseMap<const MachineInstr *, LexicalScope *> &M);
+  void constructScopeNest(LexicalScope *Scope);
+  void
+  assignInstructionRanges(SmallVectorImpl<InsnRange> &MIRanges,
+                          DenseMap<const MachineInstr *, LexicalScope *> &M);
+
+  const MachineFunction *MF = nullptr;
+
+  /// LexicalScopeMap - Tracks the scopes in the current function.
+  // Use an unordered_map to ensure value pointer validity over insertion.
+  std::unordered_map<const DILocalScope *, LexicalScope> LexicalScopeMap;
+
+  /// InlinedLexicalScopeMap - Tracks inlined function scopes in current
+  /// function.
+  std::unordered_map<std::pair<const DILocalScope *, const DILocation *>,
+                     LexicalScope,
+                     pair_hash<const DILocalScope *, const DILocation *>>
+      InlinedLexicalScopeMap;
+
+  /// AbstractScopeMap - These scopes are  not included LexicalScopeMap.
+  // Use an unordered_map to ensure value pointer validity over insertion.
+  std::unordered_map<const DILocalScope *, LexicalScope> AbstractScopeMap;
+
+  /// AbstractScopesList - Tracks abstract scopes constructed while processing
+  /// a function.
+  SmallVector<LexicalScope *, 4> AbstractScopesList;
+
+  /// CurrentFnLexicalScope - Top level scope for the current function.
+  ///
+  LexicalScope *CurrentFnLexicalScope = nullptr;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_LEXICALSCOPES_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/LinkAllAsmWriterComponents.h b/linux-x64/clang/include/llvm/CodeGen/LinkAllAsmWriterComponents.h
new file mode 100644
index 0000000..c3046da
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LinkAllAsmWriterComponents.h
@@ -0,0 +1,38 @@
+//===- llvm/Codegen/LinkAllAsmWriterComponents.h ----------------*- C++ -*-===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file pulls in all assembler writer related passes for tools like
+// llc that need this functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
+#define LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
+
+#include "llvm/CodeGen/GCs.h"
+#include <cstdlib>
+
+namespace {
+  struct ForceAsmWriterLinking {
+    ForceAsmWriterLinking() {
+      // We must reference the plug-ins in such a way that compilers will not
+      // delete it all as dead code, even with whole program optimization,
+      // yet is effectively a NO-OP. As the compiler isn't smart enough
+      // to know that getenv() never returns -1, this will do the job.
+      if (std::getenv("bar") != (char*) -1)
+        return;
+
+      llvm::linkOcamlGCPrinter();
+      llvm::linkErlangGCPrinter();
+
+    }
+  } ForceAsmWriterLinking; // Force link by creating a global definition.
+}
+
+#endif // LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/LinkAllCodegenComponents.h b/linux-x64/clang/include/llvm/CodeGen/LinkAllCodegenComponents.h
new file mode 100644
index 0000000..fee131e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LinkAllCodegenComponents.h
@@ -0,0 +1,59 @@
+//===- llvm/Codegen/LinkAllCodegenComponents.h ------------------*- C++ -*-===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file pulls in all codegen related passes for tools like lli and
+// llc that need this functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LINKALLCODEGENCOMPONENTS_H
+#define LLVM_CODEGEN_LINKALLCODEGENCOMPONENTS_H
+
+#include "llvm/CodeGen/GCs.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/Target/TargetMachine.h"
+#include <cstdlib>
+
+namespace {
+  struct ForceCodegenLinking {
+    ForceCodegenLinking() {
+      // We must reference the passes in such a way that compilers will not
+      // delete it all as dead code, even with whole program optimization,
+      // yet is effectively a NO-OP. As the compiler isn't smart enough
+      // to know that getenv() never returns -1, this will do the job.
+      if (std::getenv("bar") != (char*) -1)
+        return;
+
+      (void) llvm::createFastRegisterAllocator();
+      (void) llvm::createBasicRegisterAllocator();
+      (void) llvm::createGreedyRegisterAllocator();
+      (void) llvm::createDefaultPBQPRegisterAllocator();
+
+      llvm::linkCoreCLRGC();
+      llvm::linkOcamlGC();
+      llvm::linkErlangGC();
+      llvm::linkShadowStackGC();
+      llvm::linkStatepointExampleGC();
+
+      (void) llvm::createBURRListDAGScheduler(nullptr,
+                                              llvm::CodeGenOpt::Default);
+      (void) llvm::createSourceListDAGScheduler(nullptr,
+                                                llvm::CodeGenOpt::Default);
+      (void) llvm::createHybridListDAGScheduler(nullptr,
+                                                llvm::CodeGenOpt::Default);
+      (void) llvm::createFastDAGScheduler(nullptr, llvm::CodeGenOpt::Default);
+      (void) llvm::createDefaultScheduler(nullptr, llvm::CodeGenOpt::Default);
+      (void) llvm::createVLIWDAGScheduler(nullptr, llvm::CodeGenOpt::Default);
+
+    }
+  } ForceCodegenLinking; // Force link by creating a global definition.
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveInterval.h b/linux-x64/clang/include/llvm/CodeGen/LiveInterval.h
new file mode 100644
index 0000000..f4fa872
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveInterval.h
@@ -0,0 +1,943 @@
+//===- llvm/CodeGen/LiveInterval.h - Interval representation ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LiveRange and LiveInterval classes.  Given some
+// numbering of each the machine instructions an interval [i, j) is said to be a
+// live range for register v if there is no instruction with number j' >= j
+// such that v is live at j' and there is no instruction with number i' < i such
+// that v is live at i'. In this implementation ranges can have holes,
+// i.e. a range might look like [1,20), [50,65), [1000,1001).  Each
+// individual segment is represented as an instance of LiveRange::Segment,
+// and the whole range is represented as an instance of LiveRange.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEINTERVAL_H
+#define LLVM_CODEGEN_LIVEINTERVAL_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/IntEqClasses.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/MC/LaneBitmask.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/MathExtras.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <functional>
+#include <memory>
+#include <set>
+#include <tuple>
+#include <utility>
+
+namespace llvm {
+
+  class CoalescerPair;
+  class LiveIntervals;
+  class MachineRegisterInfo;
+  class raw_ostream;
+
+  /// VNInfo - Value Number Information.
+  /// This class holds information about a machine level values, including
+  /// definition and use points.
+  ///
+  class VNInfo {
+  public:
+    using Allocator = BumpPtrAllocator;
+
+    /// The ID number of this value.
+    unsigned id;
+
+    /// The index of the defining instruction.
+    SlotIndex def;
+
+    /// VNInfo constructor.
+    VNInfo(unsigned i, SlotIndex d) : id(i), def(d) {}
+
+    /// VNInfo constructor, copies values from orig, except for the value number.
+    VNInfo(unsigned i, const VNInfo &orig) : id(i), def(orig.def) {}
+
+    /// Copy from the parameter into this VNInfo.
+    void copyFrom(VNInfo &src) {
+      def = src.def;
+    }
+
+    /// Returns true if this value is defined by a PHI instruction (or was,
+    /// PHI instructions may have been eliminated).
+    /// PHI-defs begin at a block boundary, all other defs begin at register or
+    /// EC slots.
+    bool isPHIDef() const { return def.isBlock(); }
+
+    /// Returns true if this value is unused.
+    bool isUnused() const { return !def.isValid(); }
+
+    /// Mark this value as unused.
+    void markUnused() { def = SlotIndex(); }
+  };
+
+  /// Result of a LiveRange query. This class hides the implementation details
+  /// of live ranges, and it should be used as the primary interface for
+  /// examining live ranges around instructions.
+  class LiveQueryResult {
+    VNInfo *const EarlyVal;
+    VNInfo *const LateVal;
+    const SlotIndex EndPoint;
+    const bool Kill;
+
+  public:
+    LiveQueryResult(VNInfo *EarlyVal, VNInfo *LateVal, SlotIndex EndPoint,
+                    bool Kill)
+      : EarlyVal(EarlyVal), LateVal(LateVal), EndPoint(EndPoint), Kill(Kill)
+    {}
+
+    /// Return the value that is live-in to the instruction. This is the value
+    /// that will be read by the instruction's use operands. Return NULL if no
+    /// value is live-in.
+    VNInfo *valueIn() const {
+      return EarlyVal;
+    }
+
+    /// Return true if the live-in value is killed by this instruction. This
+    /// means that either the live range ends at the instruction, or it changes
+    /// value.
+    bool isKill() const {
+      return Kill;
+    }
+
+    /// Return true if this instruction has a dead def.
+    bool isDeadDef() const {
+      return EndPoint.isDead();
+    }
+
+    /// Return the value leaving the instruction, if any. This can be a
+    /// live-through value, or a live def. A dead def returns NULL.
+    VNInfo *valueOut() const {
+      return isDeadDef() ? nullptr : LateVal;
+    }
+
+    /// Returns the value alive at the end of the instruction, if any. This can
+    /// be a live-through value, a live def or a dead def.
+    VNInfo *valueOutOrDead() const {
+      return LateVal;
+    }
+
+    /// Return the value defined by this instruction, if any. This includes
+    /// dead defs, it is the value created by the instruction's def operands.
+    VNInfo *valueDefined() const {
+      return EarlyVal == LateVal ? nullptr : LateVal;
+    }
+
+    /// Return the end point of the last live range segment to interact with
+    /// the instruction, if any.
+    ///
+    /// The end point is an invalid SlotIndex only if the live range doesn't
+    /// intersect the instruction at all.
+    ///
+    /// The end point may be at or past the end of the instruction's basic
+    /// block. That means the value was live out of the block.
+    SlotIndex endPoint() const {
+      return EndPoint;
+    }
+  };
+
+  /// This class represents the liveness of a register, stack slot, etc.
+  /// It manages an ordered list of Segment objects.
+  /// The Segments are organized in a static single assignment form: At places
+  /// where a new value is defined or different values reach a CFG join a new
+  /// segment with a new value number is used.
+  class LiveRange {
+  public:
+    /// This represents a simple continuous liveness interval for a value.
+    /// The start point is inclusive, the end point exclusive. These intervals
+    /// are rendered as [start,end).
+    struct Segment {
+      SlotIndex start;  // Start point of the interval (inclusive)
+      SlotIndex end;    // End point of the interval (exclusive)
+      VNInfo *valno = nullptr; // identifier for the value contained in this
+                               // segment.
+
+      Segment() = default;
+
+      Segment(SlotIndex S, SlotIndex E, VNInfo *V)
+        : start(S), end(E), valno(V) {
+        assert(S < E && "Cannot create empty or backwards segment");
+      }
+
+      /// Return true if the index is covered by this segment.
+      bool contains(SlotIndex I) const {
+        return start <= I && I < end;
+      }
+
+      /// Return true if the given interval, [S, E), is covered by this segment.
+      bool containsInterval(SlotIndex S, SlotIndex E) const {
+        assert((S < E) && "Backwards interval?");
+        return (start <= S && S < end) && (start < E && E <= end);
+      }
+
+      bool operator<(const Segment &Other) const {
+        return std::tie(start, end) < std::tie(Other.start, Other.end);
+      }
+      bool operator==(const Segment &Other) const {
+        return start == Other.start && end == Other.end;
+      }
+
+      void dump() const;
+    };
+
+    using Segments = SmallVector<Segment, 2>;
+    using VNInfoList = SmallVector<VNInfo *, 2>;
+
+    Segments segments;   // the liveness segments
+    VNInfoList valnos;   // value#'s
+
+    // The segment set is used temporarily to accelerate initial computation
+    // of live ranges of physical registers in computeRegUnitRange.
+    // After that the set is flushed to the segment vector and deleted.
+    using SegmentSet = std::set<Segment>;
+    std::unique_ptr<SegmentSet> segmentSet;
+
+    using iterator = Segments::iterator;
+    using const_iterator = Segments::const_iterator;
+
+    iterator begin() { return segments.begin(); }
+    iterator end()   { return segments.end(); }
+
+    const_iterator begin() const { return segments.begin(); }
+    const_iterator end() const  { return segments.end(); }
+
+    using vni_iterator = VNInfoList::iterator;
+    using const_vni_iterator = VNInfoList::const_iterator;
+
+    vni_iterator vni_begin() { return valnos.begin(); }
+    vni_iterator vni_end()   { return valnos.end(); }
+
+    const_vni_iterator vni_begin() const { return valnos.begin(); }
+    const_vni_iterator vni_end() const   { return valnos.end(); }
+
+    /// Constructs a new LiveRange object.
+    LiveRange(bool UseSegmentSet = false)
+        : segmentSet(UseSegmentSet ? llvm::make_unique<SegmentSet>()
+                                   : nullptr) {}
+
+    /// Constructs a new LiveRange object by copying segments and valnos from
+    /// another LiveRange.
+    LiveRange(const LiveRange &Other, BumpPtrAllocator &Allocator) {
+      assert(Other.segmentSet == nullptr &&
+             "Copying of LiveRanges with active SegmentSets is not supported");
+      assign(Other, Allocator);
+    }
+
+    /// Copies values numbers and live segments from \p Other into this range.
+    void assign(const LiveRange &Other, BumpPtrAllocator &Allocator) {
+      if (this == &Other)
+        return;
+
+      assert(Other.segmentSet == nullptr &&
+             "Copying of LiveRanges with active SegmentSets is not supported");
+      // Duplicate valnos.
+      for (const VNInfo *VNI : Other.valnos)
+        createValueCopy(VNI, Allocator);
+      // Now we can copy segments and remap their valnos.
+      for (const Segment &S : Other.segments)
+        segments.push_back(Segment(S.start, S.end, valnos[S.valno->id]));
+    }
+
+    /// advanceTo - Advance the specified iterator to point to the Segment
+    /// containing the specified position, or end() if the position is past the
+    /// end of the range.  If no Segment contains this position, but the
+    /// position is in a hole, this method returns an iterator pointing to the
+    /// Segment immediately after the hole.
+    iterator advanceTo(iterator I, SlotIndex Pos) {
+      assert(I != end());
+      if (Pos >= endIndex())
+        return end();
+      while (I->end <= Pos) ++I;
+      return I;
+    }
+
+    const_iterator advanceTo(const_iterator I, SlotIndex Pos) const {
+      assert(I != end());
+      if (Pos >= endIndex())
+        return end();
+      while (I->end <= Pos) ++I;
+      return I;
+    }
+
+    /// find - Return an iterator pointing to the first segment that ends after
+    /// Pos, or end(). This is the same as advanceTo(begin(), Pos), but faster
+    /// when searching large ranges.
+    ///
+    /// If Pos is contained in a Segment, that segment is returned.
+    /// If Pos is in a hole, the following Segment is returned.
+    /// If Pos is beyond endIndex, end() is returned.
+    iterator find(SlotIndex Pos);
+
+    const_iterator find(SlotIndex Pos) const {
+      return const_cast<LiveRange*>(this)->find(Pos);
+    }
+
+    void clear() {
+      valnos.clear();
+      segments.clear();
+    }
+
+    size_t size() const {
+      return segments.size();
+    }
+
+    bool hasAtLeastOneValue() const { return !valnos.empty(); }
+
+    bool containsOneValue() const { return valnos.size() == 1; }
+
+    unsigned getNumValNums() const { return (unsigned)valnos.size(); }
+
+    /// getValNumInfo - Returns pointer to the specified val#.
+    ///
+    inline VNInfo *getValNumInfo(unsigned ValNo) {
+      return valnos[ValNo];
+    }
+    inline const VNInfo *getValNumInfo(unsigned ValNo) const {
+      return valnos[ValNo];
+    }
+
+    /// containsValue - Returns true if VNI belongs to this range.
+    bool containsValue(const VNInfo *VNI) const {
+      return VNI && VNI->id < getNumValNums() && VNI == getValNumInfo(VNI->id);
+    }
+
+    /// getNextValue - Create a new value number and return it.  MIIdx specifies
+    /// the instruction that defines the value number.
+    VNInfo *getNextValue(SlotIndex def, VNInfo::Allocator &VNInfoAllocator) {
+      VNInfo *VNI =
+        new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), def);
+      valnos.push_back(VNI);
+      return VNI;
+    }
+
+    /// createDeadDef - Make sure the range has a value defined at Def.
+    /// If one already exists, return it. Otherwise allocate a new value and
+    /// add liveness for a dead def.
+    VNInfo *createDeadDef(SlotIndex Def, VNInfo::Allocator &VNInfoAllocator);
+
+    /// Create a def of value @p VNI. Return @p VNI. If there already exists
+    /// a definition at VNI->def, the value defined there must be @p VNI.
+    VNInfo *createDeadDef(VNInfo *VNI);
+
+    /// Create a copy of the given value. The new value will be identical except
+    /// for the Value number.
+    VNInfo *createValueCopy(const VNInfo *orig,
+                            VNInfo::Allocator &VNInfoAllocator) {
+      VNInfo *VNI =
+        new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), *orig);
+      valnos.push_back(VNI);
+      return VNI;
+    }
+
+    /// RenumberValues - Renumber all values in order of appearance and remove
+    /// unused values.
+    void RenumberValues();
+
+    /// MergeValueNumberInto - This method is called when two value numbers
+    /// are found to be equivalent.  This eliminates V1, replacing all
+    /// segments with the V1 value number with the V2 value number.  This can
+    /// cause merging of V1/V2 values numbers and compaction of the value space.
+    VNInfo* MergeValueNumberInto(VNInfo *V1, VNInfo *V2);
+
+    /// Merge all of the live segments of a specific val# in RHS into this live
+    /// range as the specified value number. The segments in RHS are allowed
+    /// to overlap with segments in the current range, it will replace the
+    /// value numbers of the overlaped live segments with the specified value
+    /// number.
+    void MergeSegmentsInAsValue(const LiveRange &RHS, VNInfo *LHSValNo);
+
+    /// MergeValueInAsValue - Merge all of the segments of a specific val#
+    /// in RHS into this live range as the specified value number.
+    /// The segments in RHS are allowed to overlap with segments in the
+    /// current range, but only if the overlapping segments have the
+    /// specified value number.
+    void MergeValueInAsValue(const LiveRange &RHS,
+                             const VNInfo *RHSValNo, VNInfo *LHSValNo);
+
+    bool empty() const { return segments.empty(); }
+
+    /// beginIndex - Return the lowest numbered slot covered.
+    SlotIndex beginIndex() const {
+      assert(!empty() && "Call to beginIndex() on empty range.");
+      return segments.front().start;
+    }
+
+    /// endNumber - return the maximum point of the range of the whole,
+    /// exclusive.
+    SlotIndex endIndex() const {
+      assert(!empty() && "Call to endIndex() on empty range.");
+      return segments.back().end;
+    }
+
+    bool expiredAt(SlotIndex index) const {
+      return index >= endIndex();
+    }
+
+    bool liveAt(SlotIndex index) const {
+      const_iterator r = find(index);
+      return r != end() && r->start <= index;
+    }
+
+    /// Return the segment that contains the specified index, or null if there
+    /// is none.
+    const Segment *getSegmentContaining(SlotIndex Idx) const {
+      const_iterator I = FindSegmentContaining(Idx);
+      return I == end() ? nullptr : &*I;
+    }
+
+    /// Return the live segment that contains the specified index, or null if
+    /// there is none.
+    Segment *getSegmentContaining(SlotIndex Idx) {
+      iterator I = FindSegmentContaining(Idx);
+      return I == end() ? nullptr : &*I;
+    }
+
+    /// getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
+    VNInfo *getVNInfoAt(SlotIndex Idx) const {
+      const_iterator I = FindSegmentContaining(Idx);
+      return I == end() ? nullptr : I->valno;
+    }
+
+    /// getVNInfoBefore - Return the VNInfo that is live up to but not
+    /// necessarilly including Idx, or NULL. Use this to find the reaching def
+    /// used by an instruction at this SlotIndex position.
+    VNInfo *getVNInfoBefore(SlotIndex Idx) const {
+      const_iterator I = FindSegmentContaining(Idx.getPrevSlot());
+      return I == end() ? nullptr : I->valno;
+    }
+
+    /// Return an iterator to the segment that contains the specified index, or
+    /// end() if there is none.
+    iterator FindSegmentContaining(SlotIndex Idx) {
+      iterator I = find(Idx);
+      return I != end() && I->start <= Idx ? I : end();
+    }
+
+    const_iterator FindSegmentContaining(SlotIndex Idx) const {
+      const_iterator I = find(Idx);
+      return I != end() && I->start <= Idx ? I : end();
+    }
+
+    /// overlaps - Return true if the intersection of the two live ranges is
+    /// not empty.
+    bool overlaps(const LiveRange &other) const {
+      if (other.empty())
+        return false;
+      return overlapsFrom(other, other.begin());
+    }
+
+    /// overlaps - Return true if the two ranges have overlapping segments
+    /// that are not coalescable according to CP.
+    ///
+    /// Overlapping segments where one range is defined by a coalescable
+    /// copy are allowed.
+    bool overlaps(const LiveRange &Other, const CoalescerPair &CP,
+                  const SlotIndexes&) const;
+
+    /// overlaps - Return true if the live range overlaps an interval specified
+    /// by [Start, End).
+    bool overlaps(SlotIndex Start, SlotIndex End) const;
+
+    /// overlapsFrom - Return true if the intersection of the two live ranges
+    /// is not empty.  The specified iterator is a hint that we can begin
+    /// scanning the Other range starting at I.
+    bool overlapsFrom(const LiveRange &Other, const_iterator I) const;
+
+    /// Returns true if all segments of the @p Other live range are completely
+    /// covered by this live range.
+    /// Adjacent live ranges do not affect the covering:the liverange
+    /// [1,5](5,10] covers (3,7].
+    bool covers(const LiveRange &Other) const;
+
+    /// Add the specified Segment to this range, merging segments as
+    /// appropriate.  This returns an iterator to the inserted segment (which
+    /// may have grown since it was inserted).
+    iterator addSegment(Segment S);
+
+    /// Attempt to extend a value defined after @p StartIdx to include @p Use.
+    /// Both @p StartIdx and @p Use should be in the same basic block. In case
+    /// of subranges, an extension could be prevented by an explicit "undef"
+    /// caused by a <def,read-undef> on a non-overlapping lane. The list of
+    /// location of such "undefs" should be provided in @p Undefs.
+    /// The return value is a pair: the first element is VNInfo of the value
+    /// that was extended (possibly nullptr), the second is a boolean value
+    /// indicating whether an "undef" was encountered.
+    /// If this range is live before @p Use in the basic block that starts at
+    /// @p StartIdx, and there is no intervening "undef", extend it to be live
+    /// up to @p Use, and return the pair {value, false}. If there is no
+    /// segment before @p Use and there is no "undef" between @p StartIdx and
+    /// @p Use, return {nullptr, false}. If there is an "undef" before @p Use,
+    /// return {nullptr, true}.
+    std::pair<VNInfo*,bool> extendInBlock(ArrayRef<SlotIndex> Undefs,
+        SlotIndex StartIdx, SlotIndex Use);
+
+    /// Simplified version of the above "extendInBlock", which assumes that
+    /// no register lanes are undefined by <def,read-undef> operands.
+    /// If this range is live before @p Use in the basic block that starts
+    /// at @p StartIdx, extend it to be live up to @p Use, and return the
+    /// value. If there is no segment before @p Use, return nullptr.
+    VNInfo *extendInBlock(SlotIndex StartIdx, SlotIndex Kill);
+
+    /// join - Join two live ranges (this, and other) together.  This applies
+    /// mappings to the value numbers in the LHS/RHS ranges as specified.  If
+    /// the ranges are not joinable, this aborts.
+    void join(LiveRange &Other,
+              const int *ValNoAssignments,
+              const int *RHSValNoAssignments,
+              SmallVectorImpl<VNInfo *> &NewVNInfo);
+
+    /// True iff this segment is a single segment that lies between the
+    /// specified boundaries, exclusively. Vregs live across a backedge are not
+    /// considered local. The boundaries are expected to lie within an extended
+    /// basic block, so vregs that are not live out should contain no holes.
+    bool isLocal(SlotIndex Start, SlotIndex End) const {
+      return beginIndex() > Start.getBaseIndex() &&
+        endIndex() < End.getBoundaryIndex();
+    }
+
+    /// Remove the specified segment from this range.  Note that the segment
+    /// must be a single Segment in its entirety.
+    void removeSegment(SlotIndex Start, SlotIndex End,
+                       bool RemoveDeadValNo = false);
+
+    void removeSegment(Segment S, bool RemoveDeadValNo = false) {
+      removeSegment(S.start, S.end, RemoveDeadValNo);
+    }
+
+    /// Remove segment pointed to by iterator @p I from this range.  This does
+    /// not remove dead value numbers.
+    iterator removeSegment(iterator I) {
+      return segments.erase(I);
+    }
+
+    /// Query Liveness at Idx.
+    /// The sub-instruction slot of Idx doesn't matter, only the instruction
+    /// it refers to is considered.
+    LiveQueryResult Query(SlotIndex Idx) const {
+      // Find the segment that enters the instruction.
+      const_iterator I = find(Idx.getBaseIndex());
+      const_iterator E = end();
+      if (I == E)
+        return LiveQueryResult(nullptr, nullptr, SlotIndex(), false);
+
+      // Is this an instruction live-in segment?
+      // If Idx is the start index of a basic block, include live-in segments
+      // that start at Idx.getBaseIndex().
+      VNInfo *EarlyVal = nullptr;
+      VNInfo *LateVal  = nullptr;
+      SlotIndex EndPoint;
+      bool Kill = false;
+      if (I->start <= Idx.getBaseIndex()) {
+        EarlyVal = I->valno;
+        EndPoint = I->end;
+        // Move to the potentially live-out segment.
+        if (SlotIndex::isSameInstr(Idx, I->end)) {
+          Kill = true;
+          if (++I == E)
+            return LiveQueryResult(EarlyVal, LateVal, EndPoint, Kill);
+        }
+        // Special case: A PHIDef value can have its def in the middle of a
+        // segment if the value happens to be live out of the layout
+        // predecessor.
+        // Such a value is not live-in.
+        if (EarlyVal->def == Idx.getBaseIndex())
+          EarlyVal = nullptr;
+      }
+      // I now points to the segment that may be live-through, or defined by
+      // this instr. Ignore segments starting after the current instr.
+      if (!SlotIndex::isEarlierInstr(Idx, I->start)) {
+        LateVal = I->valno;
+        EndPoint = I->end;
+      }
+      return LiveQueryResult(EarlyVal, LateVal, EndPoint, Kill);
+    }
+
+    /// removeValNo - Remove all the segments defined by the specified value#.
+    /// Also remove the value# from value# list.
+    void removeValNo(VNInfo *ValNo);
+
+    /// Returns true if the live range is zero length, i.e. no live segments
+    /// span instructions. It doesn't pay to spill such a range.
+    bool isZeroLength(SlotIndexes *Indexes) const {
+      for (const Segment &S : segments)
+        if (Indexes->getNextNonNullIndex(S.start).getBaseIndex() <
+            S.end.getBaseIndex())
+          return false;
+      return true;
+    }
+
+    // Returns true if any segment in the live range contains any of the
+    // provided slot indexes.  Slots which occur in holes between
+    // segments will not cause the function to return true.
+    bool isLiveAtIndexes(ArrayRef<SlotIndex> Slots) const;
+
+    bool operator<(const LiveRange& other) const {
+      const SlotIndex &thisIndex = beginIndex();
+      const SlotIndex &otherIndex = other.beginIndex();
+      return thisIndex < otherIndex;
+    }
+
+    /// Returns true if there is an explicit "undef" between @p Begin
+    /// @p End.
+    bool isUndefIn(ArrayRef<SlotIndex> Undefs, SlotIndex Begin,
+                   SlotIndex End) const {
+      return std::any_of(Undefs.begin(), Undefs.end(),
+                [Begin,End] (SlotIndex Idx) -> bool {
+                  return Begin <= Idx && Idx < End;
+                });
+    }
+
+    /// Flush segment set into the regular segment vector.
+    /// The method is to be called after the live range
+    /// has been created, if use of the segment set was
+    /// activated in the constructor of the live range.
+    void flushSegmentSet();
+
+    void print(raw_ostream &OS) const;
+    void dump() const;
+
+    /// \brief Walk the range and assert if any invariants fail to hold.
+    ///
+    /// Note that this is a no-op when asserts are disabled.
+#ifdef NDEBUG
+    void verify() const {}
+#else
+    void verify() const;
+#endif
+
+  protected:
+    /// Append a segment to the list of segments.
+    void append(const LiveRange::Segment S);
+
+  private:
+    friend class LiveRangeUpdater;
+    void addSegmentToSet(Segment S);
+    void markValNoForDeletion(VNInfo *V);
+  };
+
+  inline raw_ostream &operator<<(raw_ostream &OS, const LiveRange &LR) {
+    LR.print(OS);
+    return OS;
+  }
+
+  /// LiveInterval - This class represents the liveness of a register,
+  /// or stack slot.
+  class LiveInterval : public LiveRange {
+  public:
+    using super = LiveRange;
+
+    /// A live range for subregisters. The LaneMask specifies which parts of the
+    /// super register are covered by the interval.
+    /// (@sa TargetRegisterInfo::getSubRegIndexLaneMask()).
+    class SubRange : public LiveRange {
+    public:
+      SubRange *Next = nullptr;
+      LaneBitmask LaneMask;
+
+      /// Constructs a new SubRange object.
+      SubRange(LaneBitmask LaneMask) : LaneMask(LaneMask) {}
+
+      /// Constructs a new SubRange object by copying liveness from @p Other.
+      SubRange(LaneBitmask LaneMask, const LiveRange &Other,
+               BumpPtrAllocator &Allocator)
+        : LiveRange(Other, Allocator), LaneMask(LaneMask) {}
+
+      void print(raw_ostream &OS) const;
+      void dump() const;
+    };
+
+  private:
+    SubRange *SubRanges = nullptr; ///< Single linked list of subregister live
+                                   /// ranges.
+
+  public:
+    const unsigned reg;  // the register or stack slot of this interval.
+    float weight;        // weight of this interval
+
+    LiveInterval(unsigned Reg, float Weight) : reg(Reg), weight(Weight) {}
+
+    ~LiveInterval() {
+      clearSubRanges();
+    }
+
+    template<typename T>
+    class SingleLinkedListIterator {
+      T *P;
+
+    public:
+      SingleLinkedListIterator<T>(T *P) : P(P) {}
+
+      SingleLinkedListIterator<T> &operator++() {
+        P = P->Next;
+        return *this;
+      }
+      SingleLinkedListIterator<T> operator++(int) {
+        SingleLinkedListIterator res = *this;
+        ++*this;
+        return res;
+      }
+      bool operator!=(const SingleLinkedListIterator<T> &Other) {
+        return P != Other.operator->();
+      }
+      bool operator==(const SingleLinkedListIterator<T> &Other) {
+        return P == Other.operator->();
+      }
+      T &operator*() const {
+        return *P;
+      }
+      T *operator->() const {
+        return P;
+      }
+    };
+
+    using subrange_iterator = SingleLinkedListIterator<SubRange>;
+    using const_subrange_iterator = SingleLinkedListIterator<const SubRange>;
+
+    subrange_iterator subrange_begin() {
+      return subrange_iterator(SubRanges);
+    }
+    subrange_iterator subrange_end() {
+      return subrange_iterator(nullptr);
+    }
+
+    const_subrange_iterator subrange_begin() const {
+      return const_subrange_iterator(SubRanges);
+    }
+    const_subrange_iterator subrange_end() const {
+      return const_subrange_iterator(nullptr);
+    }
+
+    iterator_range<subrange_iterator> subranges() {
+      return make_range(subrange_begin(), subrange_end());
+    }
+
+    iterator_range<const_subrange_iterator> subranges() const {
+      return make_range(subrange_begin(), subrange_end());
+    }
+
+    /// Creates a new empty subregister live range. The range is added at the
+    /// beginning of the subrange list; subrange iterators stay valid.
+    SubRange *createSubRange(BumpPtrAllocator &Allocator,
+                             LaneBitmask LaneMask) {
+      SubRange *Range = new (Allocator) SubRange(LaneMask);
+      appendSubRange(Range);
+      return Range;
+    }
+
+    /// Like createSubRange() but the new range is filled with a copy of the
+    /// liveness information in @p CopyFrom.
+    SubRange *createSubRangeFrom(BumpPtrAllocator &Allocator,
+                                 LaneBitmask LaneMask,
+                                 const LiveRange &CopyFrom) {
+      SubRange *Range = new (Allocator) SubRange(LaneMask, CopyFrom, Allocator);
+      appendSubRange(Range);
+      return Range;
+    }
+
+    /// Returns true if subregister liveness information is available.
+    bool hasSubRanges() const {
+      return SubRanges != nullptr;
+    }
+
+    /// Removes all subregister liveness information.
+    void clearSubRanges();
+
+    /// Removes all subranges without any segments (subranges without segments
+    /// are not considered valid and should only exist temporarily).
+    void removeEmptySubRanges();
+
+    /// getSize - Returns the sum of sizes of all the LiveRange's.
+    ///
+    unsigned getSize() const;
+
+    /// isSpillable - Can this interval be spilled?
+    bool isSpillable() const {
+      return weight != huge_valf;
+    }
+
+    /// markNotSpillable - Mark interval as not spillable
+    void markNotSpillable() {
+      weight = huge_valf;
+    }
+
+    /// For a given lane mask @p LaneMask, compute indexes at which the
+    /// lane is marked undefined by subregister <def,read-undef> definitions.
+    void computeSubRangeUndefs(SmallVectorImpl<SlotIndex> &Undefs,
+                               LaneBitmask LaneMask,
+                               const MachineRegisterInfo &MRI,
+                               const SlotIndexes &Indexes) const;
+
+    /// Refines the subranges to support \p LaneMask. This may only be called
+    /// for LI.hasSubrange()==true. Subregister ranges are split or created
+    /// until \p LaneMask can be matched exactly. \p Mod is executed on the
+    /// matching subranges.
+    ///
+    /// Example:
+    ///    Given an interval with subranges with lanemasks L0F00, L00F0 and
+    ///    L000F, refining for mask L0018. Will split the L00F0 lane into
+    ///    L00E0 and L0010 and the L000F lane into L0007 and L0008. The Mod
+    ///    function will be applied to the L0010 and L0008 subranges.
+    void refineSubRanges(BumpPtrAllocator &Allocator, LaneBitmask LaneMask,
+                         std::function<void(LiveInterval::SubRange&)> Mod);
+
+    bool operator<(const LiveInterval& other) const {
+      const SlotIndex &thisIndex = beginIndex();
+      const SlotIndex &otherIndex = other.beginIndex();
+      return std::tie(thisIndex, reg) < std::tie(otherIndex, other.reg);
+    }
+
+    void print(raw_ostream &OS) const;
+    void dump() const;
+
+    /// \brief Walks the interval and assert if any invariants fail to hold.
+    ///
+    /// Note that this is a no-op when asserts are disabled.
+#ifdef NDEBUG
+    void verify(const MachineRegisterInfo *MRI = nullptr) const {}
+#else
+    void verify(const MachineRegisterInfo *MRI = nullptr) const;
+#endif
+
+  private:
+    /// Appends @p Range to SubRanges list.
+    void appendSubRange(SubRange *Range) {
+      Range->Next = SubRanges;
+      SubRanges = Range;
+    }
+
+    /// Free memory held by SubRange.
+    void freeSubRange(SubRange *S);
+  };
+
+  inline raw_ostream &operator<<(raw_ostream &OS,
+                                 const LiveInterval::SubRange &SR) {
+    SR.print(OS);
+    return OS;
+  }
+
+  inline raw_ostream &operator<<(raw_ostream &OS, const LiveInterval &LI) {
+    LI.print(OS);
+    return OS;
+  }
+
+  raw_ostream &operator<<(raw_ostream &OS, const LiveRange::Segment &S);
+
+  inline bool operator<(SlotIndex V, const LiveRange::Segment &S) {
+    return V < S.start;
+  }
+
+  inline bool operator<(const LiveRange::Segment &S, SlotIndex V) {
+    return S.start < V;
+  }
+
+  /// Helper class for performant LiveRange bulk updates.
+  ///
+  /// Calling LiveRange::addSegment() repeatedly can be expensive on large
+  /// live ranges because segments after the insertion point may need to be
+  /// shifted. The LiveRangeUpdater class can defer the shifting when adding
+  /// many segments in order.
+  ///
+  /// The LiveRange will be in an invalid state until flush() is called.
+  class LiveRangeUpdater {
+    LiveRange *LR;
+    SlotIndex LastStart;
+    LiveRange::iterator WriteI;
+    LiveRange::iterator ReadI;
+    SmallVector<LiveRange::Segment, 16> Spills;
+    void mergeSpills();
+
+  public:
+    /// Create a LiveRangeUpdater for adding segments to LR.
+    /// LR will temporarily be in an invalid state until flush() is called.
+    LiveRangeUpdater(LiveRange *lr = nullptr) : LR(lr) {}
+
+    ~LiveRangeUpdater() { flush(); }
+
+    /// Add a segment to LR and coalesce when possible, just like
+    /// LR.addSegment(). Segments should be added in increasing start order for
+    /// best performance.
+    void add(LiveRange::Segment);
+
+    void add(SlotIndex Start, SlotIndex End, VNInfo *VNI) {
+      add(LiveRange::Segment(Start, End, VNI));
+    }
+
+    /// Return true if the LR is currently in an invalid state, and flush()
+    /// needs to be called.
+    bool isDirty() const { return LastStart.isValid(); }
+
+    /// Flush the updater state to LR so it is valid and contains all added
+    /// segments.
+    void flush();
+
+    /// Select a different destination live range.
+    void setDest(LiveRange *lr) {
+      if (LR != lr && isDirty())
+        flush();
+      LR = lr;
+    }
+
+    /// Get the current destination live range.
+    LiveRange *getDest() const { return LR; }
+
+    void dump() const;
+    void print(raw_ostream&) const;
+  };
+
+  inline raw_ostream &operator<<(raw_ostream &OS, const LiveRangeUpdater &X) {
+    X.print(OS);
+    return OS;
+  }
+
+  /// ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a
+  /// LiveInterval into equivalence clases of connected components. A
+  /// LiveInterval that has multiple connected components can be broken into
+  /// multiple LiveIntervals.
+  ///
+  /// Given a LiveInterval that may have multiple connected components, run:
+  ///
+  ///   unsigned numComps = ConEQ.Classify(LI);
+  ///   if (numComps > 1) {
+  ///     // allocate numComps-1 new LiveIntervals into LIS[1..]
+  ///     ConEQ.Distribute(LIS);
+  /// }
+
+  class ConnectedVNInfoEqClasses {
+    LiveIntervals &LIS;
+    IntEqClasses EqClass;
+
+  public:
+    explicit ConnectedVNInfoEqClasses(LiveIntervals &lis) : LIS(lis) {}
+
+    /// Classify the values in \p LR into connected components.
+    /// Returns the number of connected components.
+    unsigned Classify(const LiveRange &LR);
+
+    /// getEqClass - Classify creates equivalence classes numbered 0..N. Return
+    /// the equivalence class assigned the VNI.
+    unsigned getEqClass(const VNInfo *VNI) const { return EqClass[VNI->id]; }
+
+    /// Distribute values in \p LI into a separate LiveIntervals
+    /// for each connected component. LIV must have an empty LiveInterval for
+    /// each additional connected component. The first connected component is
+    /// left in \p LI.
+    void Distribute(LiveInterval &LI, LiveInterval *LIV[],
+                    MachineRegisterInfo &MRI);
+  };
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_LIVEINTERVAL_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveIntervalUnion.h b/linux-x64/clang/include/llvm/CodeGen/LiveIntervalUnion.h
new file mode 100644
index 0000000..b922e54
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveIntervalUnion.h
@@ -0,0 +1,199 @@
+//===- LiveIntervalUnion.h - Live interval union data struct ---*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// LiveIntervalUnion is a union of live segments across multiple live virtual
+// registers. This may be used during coalescing to represent a congruence
+// class, or during register allocation to model liveness of a physical
+// register.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEINTERVALUNION_H
+#define LLVM_CODEGEN_LIVEINTERVALUNION_H
+
+#include "llvm/ADT/IntervalMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include <cassert>
+#include <limits>
+
+namespace llvm {
+
+class raw_ostream;
+class TargetRegisterInfo;
+
+#ifndef NDEBUG
+// forward declaration
+template <unsigned Element> class SparseBitVector;
+
+using LiveVirtRegBitSet = SparseBitVector<128>;
+#endif
+
+/// Union of live intervals that are strong candidates for coalescing into a
+/// single register (either physical or virtual depending on the context).  We
+/// expect the constituent live intervals to be disjoint, although we may
+/// eventually make exceptions to handle value-based interference.
+class LiveIntervalUnion {
+  // A set of live virtual register segments that supports fast insertion,
+  // intersection, and removal.
+  // Mapping SlotIndex intervals to virtual register numbers.
+  using LiveSegments = IntervalMap<SlotIndex, LiveInterval*>;
+
+public:
+  // SegmentIter can advance to the next segment ordered by starting position
+  // which may belong to a different live virtual register. We also must be able
+  // to reach the current segment's containing virtual register.
+  using SegmentIter = LiveSegments::iterator;
+
+  /// Const version of SegmentIter.
+  using ConstSegmentIter = LiveSegments::const_iterator;
+
+  // LiveIntervalUnions share an external allocator.
+  using Allocator = LiveSegments::Allocator;
+
+private:
+  unsigned Tag = 0;       // unique tag for current contents.
+  LiveSegments Segments;  // union of virtual reg segments
+
+public:
+  explicit LiveIntervalUnion(Allocator &a) : Segments(a) {}
+
+  // Iterate over all segments in the union of live virtual registers ordered
+  // by their starting position.
+  SegmentIter begin() { return Segments.begin(); }
+  SegmentIter end() { return Segments.end(); }
+  SegmentIter find(SlotIndex x) { return Segments.find(x); }
+  ConstSegmentIter begin() const { return Segments.begin(); }
+  ConstSegmentIter end() const { return Segments.end(); }
+  ConstSegmentIter find(SlotIndex x) const { return Segments.find(x); }
+
+  bool empty() const { return Segments.empty(); }
+  SlotIndex startIndex() const { return Segments.start(); }
+
+  // Provide public access to the underlying map to allow overlap iteration.
+  using Map = LiveSegments;
+  const Map &getMap() const { return Segments; }
+
+  /// getTag - Return an opaque tag representing the current state of the union.
+  unsigned getTag() const { return Tag; }
+
+  /// changedSince - Return true if the union change since getTag returned tag.
+  bool changedSince(unsigned tag) const { return tag != Tag; }
+
+  // Add a live virtual register to this union and merge its segments.
+  void unify(LiveInterval &VirtReg, const LiveRange &Range);
+
+  // Remove a live virtual register's segments from this union.
+  void extract(LiveInterval &VirtReg, const LiveRange &Range);
+
+  // Remove all inserted virtual registers.
+  void clear() { Segments.clear(); ++Tag; }
+
+  // Print union, using TRI to translate register names
+  void print(raw_ostream &OS, const TargetRegisterInfo *TRI) const;
+
+#ifndef NDEBUG
+  // Verify the live intervals in this union and add them to the visited set.
+  void verify(LiveVirtRegBitSet& VisitedVRegs);
+#endif
+
+  /// Query interferences between a single live virtual register and a live
+  /// interval union.
+  class Query {
+    const LiveIntervalUnion *LiveUnion = nullptr;
+    const LiveRange *LR = nullptr;
+    LiveRange::const_iterator LRI;  ///< current position in LR
+    ConstSegmentIter LiveUnionI;    ///< current position in LiveUnion
+    SmallVector<LiveInterval*,4> InterferingVRegs;
+    bool CheckedFirstInterference = false;
+    bool SeenAllInterferences = false;
+    unsigned Tag = 0;
+    unsigned UserTag = 0;
+
+    void reset(unsigned NewUserTag, const LiveRange &NewLR,
+               const LiveIntervalUnion &NewLiveUnion) {
+      LiveUnion = &NewLiveUnion;
+      LR = &NewLR;
+      InterferingVRegs.clear();
+      CheckedFirstInterference = false;
+      SeenAllInterferences = false;
+      Tag = NewLiveUnion.getTag();
+      UserTag = NewUserTag;
+    }
+
+  public:
+    Query() = default;
+    Query(const LiveRange &LR, const LiveIntervalUnion &LIU):
+      LiveUnion(&LIU), LR(&LR) {}
+    Query(const Query &) = delete;
+    Query &operator=(const Query &) = delete;
+
+    void init(unsigned NewUserTag, const LiveRange &NewLR,
+              const LiveIntervalUnion &NewLiveUnion) {
+      if (UserTag == NewUserTag && LR == &NewLR && LiveUnion == &NewLiveUnion &&
+          !NewLiveUnion.changedSince(Tag)) {
+        // Retain cached results, e.g. firstInterference.
+        return;
+      }
+      reset(NewUserTag, NewLR, NewLiveUnion);
+    }
+
+    // Does this live virtual register interfere with the union?
+    bool checkInterference() { return collectInterferingVRegs(1); }
+
+    // Count the virtual registers in this union that interfere with this
+    // query's live virtual register, up to maxInterferingRegs.
+    unsigned collectInterferingVRegs(
+        unsigned MaxInterferingRegs = std::numeric_limits<unsigned>::max());
+
+    // Was this virtual register visited during collectInterferingVRegs?
+    bool isSeenInterference(LiveInterval *VReg) const;
+
+    // Did collectInterferingVRegs collect all interferences?
+    bool seenAllInterferences() const { return SeenAllInterferences; }
+
+    // Vector generated by collectInterferingVRegs.
+    const SmallVectorImpl<LiveInterval*> &interferingVRegs() const {
+      return InterferingVRegs;
+    }
+  };
+
+  // Array of LiveIntervalUnions.
+  class Array {
+    unsigned Size = 0;
+    LiveIntervalUnion *LIUs = nullptr;
+
+  public:
+    Array() = default;
+    ~Array() { clear(); }
+
+    // Initialize the array to have Size entries.
+    // Reuse an existing allocation if the size matches.
+    void init(LiveIntervalUnion::Allocator&, unsigned Size);
+
+    unsigned size() const { return Size; }
+
+    void clear();
+
+    LiveIntervalUnion& operator[](unsigned idx) {
+      assert(idx <  Size && "idx out of bounds");
+      return LIUs[idx];
+    }
+
+    const LiveIntervalUnion& operator[](unsigned Idx) const {
+      assert(Idx < Size && "Idx out of bounds");
+      return LIUs[Idx];
+    }
+  };
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_LIVEINTERVALUNION_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveIntervals.h b/linux-x64/clang/include/llvm/CodeGen/LiveIntervals.h
new file mode 100644
index 0000000..1150f3c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveIntervals.h
@@ -0,0 +1,481 @@
+//===- LiveIntervals.h - Live Interval Analysis -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file implements the LiveInterval analysis pass.  Given some
+/// numbering of each the machine instructions (in this implemention depth-first
+/// order) an interval [i, j) is said to be a live interval for register v if
+/// there is no instruction with number j' > j such that v is live at j' and
+/// there is no instruction with number i' < i such that v is live at i'. In
+/// this implementation intervals can have holes, i.e. an interval might look
+/// like [1,20), [50,65), [1000,1001).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEINTERVALS_H
+#define LLVM_CODEGEN_LIVEINTERVALS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/MC/LaneBitmask.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+extern cl::opt<bool> UseSegmentSetForPhysRegs;
+
+class BitVector;
+class LiveRangeCalc;
+class MachineBlockFrequencyInfo;
+class MachineDominatorTree;
+class MachineFunction;
+class MachineInstr;
+class MachineRegisterInfo;
+class raw_ostream;
+class TargetInstrInfo;
+class VirtRegMap;
+
+  class LiveIntervals : public MachineFunctionPass {
+    MachineFunction* MF;
+    MachineRegisterInfo* MRI;
+    const TargetRegisterInfo* TRI;
+    const TargetInstrInfo* TII;
+    AliasAnalysis *AA;
+    SlotIndexes* Indexes;
+    MachineDominatorTree *DomTree = nullptr;
+    LiveRangeCalc *LRCalc = nullptr;
+
+    /// Special pool allocator for VNInfo's (LiveInterval val#).
+    VNInfo::Allocator VNInfoAllocator;
+
+    /// Live interval pointers for all the virtual registers.
+    IndexedMap<LiveInterval*, VirtReg2IndexFunctor> VirtRegIntervals;
+
+    /// Sorted list of instructions with register mask operands. Always use the
+    /// 'r' slot, RegMasks are normal clobbers, not early clobbers.
+    SmallVector<SlotIndex, 8> RegMaskSlots;
+
+    /// This vector is parallel to RegMaskSlots, it holds a pointer to the
+    /// corresponding register mask.  This pointer can be recomputed as:
+    ///
+    ///   MI = Indexes->getInstructionFromIndex(RegMaskSlot[N]);
+    ///   unsigned OpNum = findRegMaskOperand(MI);
+    ///   RegMaskBits[N] = MI->getOperand(OpNum).getRegMask();
+    ///
+    /// This is kept in a separate vector partly because some standard
+    /// libraries don't support lower_bound() with mixed objects, partly to
+    /// improve locality when searching in RegMaskSlots.
+    /// Also see the comment in LiveInterval::find().
+    SmallVector<const uint32_t*, 8> RegMaskBits;
+
+    /// For each basic block number, keep (begin, size) pairs indexing into the
+    /// RegMaskSlots and RegMaskBits arrays.
+    /// Note that basic block numbers may not be layout contiguous, that's why
+    /// we can't just keep track of the first register mask in each basic
+    /// block.
+    SmallVector<std::pair<unsigned, unsigned>, 8> RegMaskBlocks;
+
+    /// Keeps a live range set for each register unit to track fixed physreg
+    /// interference.
+    SmallVector<LiveRange*, 0> RegUnitRanges;
+
+  public:
+    static char ID;
+
+    LiveIntervals();
+    ~LiveIntervals() override;
+
+    /// Calculate the spill weight to assign to a single instruction.
+    static float getSpillWeight(bool isDef, bool isUse,
+                                const MachineBlockFrequencyInfo *MBFI,
+                                const MachineInstr &Instr);
+
+    /// Calculate the spill weight to assign to a single instruction.
+    static float getSpillWeight(bool isDef, bool isUse,
+                                const MachineBlockFrequencyInfo *MBFI,
+                                const MachineBasicBlock *MBB);
+
+    LiveInterval &getInterval(unsigned Reg) {
+      if (hasInterval(Reg))
+        return *VirtRegIntervals[Reg];
+      else
+        return createAndComputeVirtRegInterval(Reg);
+    }
+
+    const LiveInterval &getInterval(unsigned Reg) const {
+      return const_cast<LiveIntervals*>(this)->getInterval(Reg);
+    }
+
+    bool hasInterval(unsigned Reg) const {
+      return VirtRegIntervals.inBounds(Reg) && VirtRegIntervals[Reg];
+    }
+
+    /// Interval creation.
+    LiveInterval &createEmptyInterval(unsigned Reg) {
+      assert(!hasInterval(Reg) && "Interval already exists!");
+      VirtRegIntervals.grow(Reg);
+      VirtRegIntervals[Reg] = createInterval(Reg);
+      return *VirtRegIntervals[Reg];
+    }
+
+    LiveInterval &createAndComputeVirtRegInterval(unsigned Reg) {
+      LiveInterval &LI = createEmptyInterval(Reg);
+      computeVirtRegInterval(LI);
+      return LI;
+    }
+
+    /// Interval removal.
+    void removeInterval(unsigned Reg) {
+      delete VirtRegIntervals[Reg];
+      VirtRegIntervals[Reg] = nullptr;
+    }
+
+    /// Given a register and an instruction, adds a live segment from that
+    /// instruction to the end of its MBB.
+    LiveInterval::Segment addSegmentToEndOfBlock(unsigned reg,
+                                                 MachineInstr &startInst);
+
+    /// After removing some uses of a register, shrink its live range to just
+    /// the remaining uses. This method does not compute reaching defs for new
+    /// uses, and it doesn't remove dead defs.
+    /// Dead PHIDef values are marked as unused. New dead machine instructions
+    /// are added to the dead vector. Returns true if the interval may have been
+    /// separated into multiple connected components.
+    bool shrinkToUses(LiveInterval *li,
+                      SmallVectorImpl<MachineInstr*> *dead = nullptr);
+
+    /// Specialized version of
+    /// shrinkToUses(LiveInterval *li, SmallVectorImpl<MachineInstr*> *dead)
+    /// that works on a subregister live range and only looks at uses matching
+    /// the lane mask of the subregister range.
+    /// This may leave the subrange empty which needs to be cleaned up with
+    /// LiveInterval::removeEmptySubranges() afterwards.
+    void shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg);
+
+    /// Extend the live range \p LR to reach all points in \p Indices. The
+    /// points in the \p Indices array must be jointly dominated by the union
+    /// of the existing defs in \p LR and points in \p Undefs.
+    ///
+    /// PHI-defs are added as needed to maintain SSA form.
+    ///
+    /// If a SlotIndex in \p Indices is the end index of a basic block, \p LR
+    /// will be extended to be live out of the basic block.
+    /// If a SlotIndex in \p Indices is jointy dominated only by points in
+    /// \p Undefs, the live range will not be extended to that point.
+    ///
+    /// See also LiveRangeCalc::extend().
+    void extendToIndices(LiveRange &LR, ArrayRef<SlotIndex> Indices,
+                         ArrayRef<SlotIndex> Undefs);
+
+    void extendToIndices(LiveRange &LR, ArrayRef<SlotIndex> Indices) {
+      extendToIndices(LR, Indices, /*Undefs=*/{});
+    }
+
+    /// If \p LR has a live value at \p Kill, prune its live range by removing
+    /// any liveness reachable from Kill. Add live range end points to
+    /// EndPoints such that extendToIndices(LI, EndPoints) will reconstruct the
+    /// value's live range.
+    ///
+    /// Calling pruneValue() and extendToIndices() can be used to reconstruct
+    /// SSA form after adding defs to a virtual register.
+    void pruneValue(LiveRange &LR, SlotIndex Kill,
+                    SmallVectorImpl<SlotIndex> *EndPoints);
+
+    /// This function should not be used. Its intend is to tell you that
+    /// you are doing something wrong if you call pruveValue directly on a
+    /// LiveInterval. Indeed, you are supposed to call pruneValue on the main
+    /// LiveRange and all the LiveRange of the subranges if any.
+    LLVM_ATTRIBUTE_UNUSED void pruneValue(LiveInterval &, SlotIndex,
+                                          SmallVectorImpl<SlotIndex> *) {
+      llvm_unreachable(
+          "Use pruneValue on the main LiveRange and on each subrange");
+    }
+
+    SlotIndexes *getSlotIndexes() const {
+      return Indexes;
+    }
+
+    AliasAnalysis *getAliasAnalysis() const {
+      return AA;
+    }
+
+    /// Returns true if the specified machine instr has been removed or was
+    /// never entered in the map.
+    bool isNotInMIMap(const MachineInstr &Instr) const {
+      return !Indexes->hasIndex(Instr);
+    }
+
+    /// Returns the base index of the given instruction.
+    SlotIndex getInstructionIndex(const MachineInstr &Instr) const {
+      return Indexes->getInstructionIndex(Instr);
+    }
+
+    /// Returns the instruction associated with the given index.
+    MachineInstr* getInstructionFromIndex(SlotIndex index) const {
+      return Indexes->getInstructionFromIndex(index);
+    }
+
+    /// Return the first index in the given basic block.
+    SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
+      return Indexes->getMBBStartIdx(mbb);
+    }
+
+    /// Return the last index in the given basic block.
+    SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
+      return Indexes->getMBBEndIdx(mbb);
+    }
+
+    bool isLiveInToMBB(const LiveRange &LR,
+                       const MachineBasicBlock *mbb) const {
+      return LR.liveAt(getMBBStartIdx(mbb));
+    }
+
+    bool isLiveOutOfMBB(const LiveRange &LR,
+                        const MachineBasicBlock *mbb) const {
+      return LR.liveAt(getMBBEndIdx(mbb).getPrevSlot());
+    }
+
+    MachineBasicBlock* getMBBFromIndex(SlotIndex index) const {
+      return Indexes->getMBBFromIndex(index);
+    }
+
+    void insertMBBInMaps(MachineBasicBlock *MBB) {
+      Indexes->insertMBBInMaps(MBB);
+      assert(unsigned(MBB->getNumber()) == RegMaskBlocks.size() &&
+             "Blocks must be added in order.");
+      RegMaskBlocks.push_back(std::make_pair(RegMaskSlots.size(), 0));
+    }
+
+    SlotIndex InsertMachineInstrInMaps(MachineInstr &MI) {
+      return Indexes->insertMachineInstrInMaps(MI);
+    }
+
+    void InsertMachineInstrRangeInMaps(MachineBasicBlock::iterator B,
+                                       MachineBasicBlock::iterator E) {
+      for (MachineBasicBlock::iterator I = B; I != E; ++I)
+        Indexes->insertMachineInstrInMaps(*I);
+    }
+
+    void RemoveMachineInstrFromMaps(MachineInstr &MI) {
+      Indexes->removeMachineInstrFromMaps(MI);
+    }
+
+    SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI) {
+      return Indexes->replaceMachineInstrInMaps(MI, NewMI);
+    }
+
+    VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; }
+
+    void getAnalysisUsage(AnalysisUsage &AU) const override;
+    void releaseMemory() override;
+
+    /// Pass entry point; Calculates LiveIntervals.
+    bool runOnMachineFunction(MachineFunction&) override;
+
+    /// Implement the dump method.
+    void print(raw_ostream &O, const Module* = nullptr) const override;
+
+    /// If LI is confined to a single basic block, return a pointer to that
+    /// block.  If LI is live in to or out of any block, return NULL.
+    MachineBasicBlock *intervalIsInOneMBB(const LiveInterval &LI) const;
+
+    /// Returns true if VNI is killed by any PHI-def values in LI.
+    /// This may conservatively return true to avoid expensive computations.
+    bool hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const;
+
+    /// Add kill flags to any instruction that kills a virtual register.
+    void addKillFlags(const VirtRegMap*);
+
+    /// Call this method to notify LiveIntervals that instruction \p MI has been
+    /// moved within a basic block. This will update the live intervals for all
+    /// operands of \p MI. Moves between basic blocks are not supported.
+    ///
+    /// \param UpdateFlags Update live intervals for nonallocatable physregs.
+    void handleMove(MachineInstr &MI, bool UpdateFlags = false);
+
+    /// Update intervals for operands of \p MI so that they begin/end on the
+    /// SlotIndex for \p BundleStart.
+    ///
+    /// \param UpdateFlags Update live intervals for nonallocatable physregs.
+    ///
+    /// Requires MI and BundleStart to have SlotIndexes, and assumes
+    /// existing liveness is accurate. BundleStart should be the first
+    /// instruction in the Bundle.
+    void handleMoveIntoBundle(MachineInstr &MI, MachineInstr &BundleStart,
+                              bool UpdateFlags = false);
+
+    /// Update live intervals for instructions in a range of iterators. It is
+    /// intended for use after target hooks that may insert or remove
+    /// instructions, and is only efficient for a small number of instructions.
+    ///
+    /// OrigRegs is a vector of registers that were originally used by the
+    /// instructions in the range between the two iterators.
+    ///
+    /// Currently, the only only changes that are supported are simple removal
+    /// and addition of uses.
+    void repairIntervalsInRange(MachineBasicBlock *MBB,
+                                MachineBasicBlock::iterator Begin,
+                                MachineBasicBlock::iterator End,
+                                ArrayRef<unsigned> OrigRegs);
+
+    // Register mask functions.
+    //
+    // Machine instructions may use a register mask operand to indicate that a
+    // large number of registers are clobbered by the instruction.  This is
+    // typically used for calls.
+    //
+    // For compile time performance reasons, these clobbers are not recorded in
+    // the live intervals for individual physical registers.  Instead,
+    // LiveIntervalAnalysis maintains a sorted list of instructions with
+    // register mask operands.
+
+    /// Returns a sorted array of slot indices of all instructions with
+    /// register mask operands.
+    ArrayRef<SlotIndex> getRegMaskSlots() const { return RegMaskSlots; }
+
+    /// Returns a sorted array of slot indices of all instructions with register
+    /// mask operands in the basic block numbered \p MBBNum.
+    ArrayRef<SlotIndex> getRegMaskSlotsInBlock(unsigned MBBNum) const {
+      std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum];
+      return getRegMaskSlots().slice(P.first, P.second);
+    }
+
+    /// Returns an array of register mask pointers corresponding to
+    /// getRegMaskSlots().
+    ArrayRef<const uint32_t*> getRegMaskBits() const { return RegMaskBits; }
+
+    /// Returns an array of mask pointers corresponding to
+    /// getRegMaskSlotsInBlock(MBBNum).
+    ArrayRef<const uint32_t*> getRegMaskBitsInBlock(unsigned MBBNum) const {
+      std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum];
+      return getRegMaskBits().slice(P.first, P.second);
+    }
+
+    /// Test if \p LI is live across any register mask instructions, and
+    /// compute a bit mask of physical registers that are not clobbered by any
+    /// of them.
+    ///
+    /// Returns false if \p LI doesn't cross any register mask instructions. In
+    /// that case, the bit vector is not filled in.
+    bool checkRegMaskInterference(LiveInterval &LI,
+                                  BitVector &UsableRegs);
+
+    // Register unit functions.
+    //
+    // Fixed interference occurs when MachineInstrs use physregs directly
+    // instead of virtual registers. This typically happens when passing
+    // arguments to a function call, or when instructions require operands in
+    // fixed registers.
+    //
+    // Each physreg has one or more register units, see MCRegisterInfo. We
+    // track liveness per register unit to handle aliasing registers more
+    // efficiently.
+
+    /// Return the live range for register unit \p Unit. It will be computed if
+    /// it doesn't exist.
+    LiveRange &getRegUnit(unsigned Unit) {
+      LiveRange *LR = RegUnitRanges[Unit];
+      if (!LR) {
+        // Compute missing ranges on demand.
+        // Use segment set to speed-up initial computation of the live range.
+        RegUnitRanges[Unit] = LR = new LiveRange(UseSegmentSetForPhysRegs);
+        computeRegUnitRange(*LR, Unit);
+      }
+      return *LR;
+    }
+
+    /// Return the live range for register unit \p Unit if it has already been
+    /// computed, or nullptr if it hasn't been computed yet.
+    LiveRange *getCachedRegUnit(unsigned Unit) {
+      return RegUnitRanges[Unit];
+    }
+
+    const LiveRange *getCachedRegUnit(unsigned Unit) const {
+      return RegUnitRanges[Unit];
+    }
+
+    /// Remove computed live range for register unit \p Unit. Subsequent uses
+    /// should rely on on-demand recomputation.
+    void removeRegUnit(unsigned Unit) {
+      delete RegUnitRanges[Unit];
+      RegUnitRanges[Unit] = nullptr;
+    }
+
+    /// Remove value numbers and related live segments starting at position
+    /// \p Pos that are part of any liverange of physical register \p Reg or one
+    /// of its subregisters.
+    void removePhysRegDefAt(unsigned Reg, SlotIndex Pos);
+
+    /// Remove value number and related live segments of \p LI and its subranges
+    /// that start at position \p Pos.
+    void removeVRegDefAt(LiveInterval &LI, SlotIndex Pos);
+
+    /// Split separate components in LiveInterval \p LI into separate intervals.
+    void splitSeparateComponents(LiveInterval &LI,
+                                 SmallVectorImpl<LiveInterval*> &SplitLIs);
+
+    /// For live interval \p LI with correct SubRanges construct matching
+    /// information for the main live range. Expects the main live range to not
+    /// have any segments or value numbers.
+    void constructMainRangeFromSubranges(LiveInterval &LI);
+
+  private:
+    /// Compute live intervals for all virtual registers.
+    void computeVirtRegs();
+
+    /// Compute RegMaskSlots and RegMaskBits.
+    void computeRegMasks();
+
+    /// Walk the values in \p LI and check for dead values:
+    /// - Dead PHIDef values are marked as unused.
+    /// - Dead operands are marked as such.
+    /// - Completely dead machine instructions are added to the \p dead vector
+    ///   if it is not nullptr.
+    /// Returns true if any PHI value numbers have been removed which may
+    /// have separated the interval into multiple connected components.
+    bool computeDeadValues(LiveInterval &LI,
+                           SmallVectorImpl<MachineInstr*> *dead);
+
+    static LiveInterval* createInterval(unsigned Reg);
+
+    void printInstrs(raw_ostream &O) const;
+    void dumpInstrs() const;
+
+    void computeLiveInRegUnits();
+    void computeRegUnitRange(LiveRange&, unsigned Unit);
+    void computeVirtRegInterval(LiveInterval&);
+
+
+    /// Helper function for repairIntervalsInRange(), walks backwards and
+    /// creates/modifies live segments in \p LR to match the operands found.
+    /// Only full operands or operands with subregisters matching \p LaneMask
+    /// are considered.
+    void repairOldRegInRange(MachineBasicBlock::iterator Begin,
+                             MachineBasicBlock::iterator End,
+                             const SlotIndex endIdx, LiveRange &LR,
+                             unsigned Reg,
+                             LaneBitmask LaneMask = LaneBitmask::getAll());
+
+    class HMEditor;
+  };
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/LivePhysRegs.h b/linux-x64/clang/include/llvm/CodeGen/LivePhysRegs.h
new file mode 100644
index 0000000..f9aab0d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LivePhysRegs.h
@@ -0,0 +1,190 @@
+//===- llvm/CodeGen/LivePhysRegs.h - Live Physical Register Set -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file implements the LivePhysRegs utility for tracking liveness of
+/// physical registers. This can be used for ad-hoc liveness tracking after
+/// register allocation. You can start with the live-ins/live-outs at the
+/// beginning/end of a block and update the information while walking the
+/// instructions inside the block. This implementation tracks the liveness on a
+/// sub-register granularity.
+///
+/// We assume that the high bits of a physical super-register are not preserved
+/// unless the instruction has an implicit-use operand reading the super-
+/// register.
+///
+/// X86 Example:
+/// %ymm0 = ...
+/// %xmm0 = ... (Kills %xmm0, all %xmm0s sub-registers, and %ymm0)
+///
+/// %ymm0 = ...
+/// %xmm0 = ..., implicit %ymm0 (%ymm0 and all its sub-registers are alive)
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEPHYSREGS_H
+#define LLVM_CODEGEN_LIVEPHYSREGS_H
+
+#include "llvm/ADT/SparseSet.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include <cassert>
+#include <utility>
+
+namespace llvm {
+
+class MachineInstr;
+class MachineOperand;
+class MachineRegisterInfo;
+class raw_ostream;
+
+/// \brief A set of physical registers with utility functions to track liveness
+/// when walking backward/forward through a basic block.
+class LivePhysRegs {
+  const TargetRegisterInfo *TRI = nullptr;
+  SparseSet<unsigned> LiveRegs;
+
+public:
+  /// Constructs an unitialized set. init() needs to be called to initialize it.
+  LivePhysRegs() = default;
+
+  /// Constructs and initializes an empty set.
+  LivePhysRegs(const TargetRegisterInfo &TRI) : TRI(&TRI) {
+    LiveRegs.setUniverse(TRI.getNumRegs());
+  }
+
+  LivePhysRegs(const LivePhysRegs&) = delete;
+  LivePhysRegs &operator=(const LivePhysRegs&) = delete;
+
+  /// (re-)initializes and clears the set.
+  void init(const TargetRegisterInfo &TRI) {
+    this->TRI = &TRI;
+    LiveRegs.clear();
+    LiveRegs.setUniverse(TRI.getNumRegs());
+  }
+
+  /// Clears the set.
+  void clear() { LiveRegs.clear(); }
+
+  /// Returns true if the set is empty.
+  bool empty() const { return LiveRegs.empty(); }
+
+  /// Adds a physical register and all its sub-registers to the set.
+  void addReg(unsigned Reg) {
+    assert(TRI && "LivePhysRegs is not initialized.");
+    assert(Reg <= TRI->getNumRegs() && "Expected a physical register.");
+    for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
+         SubRegs.isValid(); ++SubRegs)
+      LiveRegs.insert(*SubRegs);
+  }
+
+  /// \brief Removes a physical register, all its sub-registers, and all its
+  /// super-registers from the set.
+  void removeReg(unsigned Reg) {
+    assert(TRI && "LivePhysRegs is not initialized.");
+    assert(Reg <= TRI->getNumRegs() && "Expected a physical register.");
+    for (MCRegAliasIterator R(Reg, TRI, true); R.isValid(); ++R)
+      LiveRegs.erase(*R);
+  }
+
+  /// Removes physical registers clobbered by the regmask operand \p MO.
+  void removeRegsInMask(const MachineOperand &MO,
+        SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> *Clobbers =
+        nullptr);
+
+  /// \brief Returns true if register \p Reg is contained in the set. This also
+  /// works if only the super register of \p Reg has been defined, because
+  /// addReg() always adds all sub-registers to the set as well.
+  /// Note: Returns false if just some sub registers are live, use available()
+  /// when searching a free register.
+  bool contains(unsigned Reg) const { return LiveRegs.count(Reg); }
+
+  /// Returns true if register \p Reg and no aliasing register is in the set.
+  bool available(const MachineRegisterInfo &MRI, unsigned Reg) const;
+
+  /// Remove defined registers and regmask kills from the set.
+  void removeDefs(const MachineInstr &MI);
+
+  /// Add uses to the set.
+  void addUses(const MachineInstr &MI);
+
+  /// Simulates liveness when stepping backwards over an instruction(bundle).
+  /// Remove Defs, add uses. This is the recommended way of calculating
+  /// liveness.
+  void stepBackward(const MachineInstr &MI);
+
+  /// Simulates liveness when stepping forward over an instruction(bundle).
+  /// Remove killed-uses, add defs. This is the not recommended way, because it
+  /// depends on accurate kill flags. If possible use stepBackward() instead of
+  /// this function. The clobbers set will be the list of registers either
+  /// defined or clobbered by a regmask.  The operand will identify whether this
+  /// is a regmask or register operand.
+  void stepForward(const MachineInstr &MI,
+        SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> &Clobbers);
+
+  /// Adds all live-in registers of basic block \p MBB.
+  /// Live in registers are the registers in the blocks live-in list and the
+  /// pristine registers.
+  void addLiveIns(const MachineBasicBlock &MBB);
+
+  /// Adds all live-out registers of basic block \p MBB.
+  /// Live out registers are the union of the live-in registers of the successor
+  /// blocks and pristine registers. Live out registers of the end block are the
+  /// callee saved registers.
+  void addLiveOuts(const MachineBasicBlock &MBB);
+
+  /// Adds all live-out registers of basic block \p MBB but skips pristine
+  /// registers.
+  void addLiveOutsNoPristines(const MachineBasicBlock &MBB);
+
+  using const_iterator = SparseSet<unsigned>::const_iterator;
+
+  const_iterator begin() const { return LiveRegs.begin(); }
+  const_iterator end() const { return LiveRegs.end(); }
+
+  /// Prints the currently live registers to \p OS.
+  void print(raw_ostream &OS) const;
+
+  /// Dumps the currently live registers to the debug output.
+  void dump() const;
+
+private:
+  /// \brief Adds live-in registers from basic block \p MBB, taking associated
+  /// lane masks into consideration.
+  void addBlockLiveIns(const MachineBasicBlock &MBB);
+
+  /// Adds pristine registers. Pristine registers are callee saved registers
+  /// that are unused in the function.
+  void addPristines(const MachineFunction &MF);
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const LivePhysRegs& LR) {
+  LR.print(OS);
+  return OS;
+}
+
+/// \brief Computes registers live-in to \p MBB assuming all of its successors
+/// live-in lists are up-to-date. Puts the result into the given LivePhysReg
+/// instance \p LiveRegs.
+void computeLiveIns(LivePhysRegs &LiveRegs, const MachineBasicBlock &MBB);
+
+/// Recomputes dead and kill flags in \p MBB.
+void recomputeLivenessFlags(MachineBasicBlock &MBB);
+
+/// Adds registers contained in \p LiveRegs to the block live-in list of \p MBB.
+/// Does not add reserved registers.
+void addLiveIns(MachineBasicBlock &MBB, const LivePhysRegs &LiveRegs);
+
+/// Convenience function combining computeLiveIns() and addLiveIns().
+void computeAndAddLiveIns(LivePhysRegs &LiveRegs,
+                          MachineBasicBlock &MBB);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_LIVEPHYSREGS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveRangeEdit.h b/linux-x64/clang/include/llvm/CodeGen/LiveRangeEdit.h
new file mode 100644
index 0000000..82b1f0b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveRangeEdit.h
@@ -0,0 +1,258 @@
+//===- LiveRangeEdit.h - Basic tools for split and spill --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The LiveRangeEdit class represents changes done to a virtual register when it
+// is spilled or split.
+//
+// The parent register is never changed. Instead, a number of new virtual
+// registers are created and added to the newRegs vector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVERANGEEDIT_H
+#define LLVM_CODEGEN_LIVERANGEEDIT_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include <cassert>
+
+namespace llvm {
+
+class LiveIntervals;
+class MachineBlockFrequencyInfo;
+class MachineInstr;
+class MachineLoopInfo;
+class MachineOperand;
+class TargetInstrInfo;
+class TargetRegisterInfo;
+class VirtRegMap;
+
+class LiveRangeEdit : private MachineRegisterInfo::Delegate {
+public:
+  /// Callback methods for LiveRangeEdit owners.
+  class Delegate {
+    virtual void anchor();
+
+  public:
+    virtual ~Delegate() = default;
+
+    /// Called immediately before erasing a dead machine instruction.
+    virtual void LRE_WillEraseInstruction(MachineInstr *MI) {}
+
+    /// Called when a virtual register is no longer used. Return false to defer
+    /// its deletion from LiveIntervals.
+    virtual bool LRE_CanEraseVirtReg(unsigned) { return true; }
+
+    /// Called before shrinking the live range of a virtual register.
+    virtual void LRE_WillShrinkVirtReg(unsigned) {}
+
+    /// Called after cloning a virtual register.
+    /// This is used for new registers representing connected components of Old.
+    virtual void LRE_DidCloneVirtReg(unsigned New, unsigned Old) {}
+  };
+
+private:
+  LiveInterval *Parent;
+  SmallVectorImpl<unsigned> &NewRegs;
+  MachineRegisterInfo &MRI;
+  LiveIntervals &LIS;
+  VirtRegMap *VRM;
+  const TargetInstrInfo &TII;
+  Delegate *const TheDelegate;
+
+  /// FirstNew - Index of the first register added to NewRegs.
+  const unsigned FirstNew;
+
+  /// ScannedRemattable - true when remattable values have been identified.
+  bool ScannedRemattable = false;
+
+  /// DeadRemats - The saved instructions which have already been dead after
+  /// rematerialization but not deleted yet -- to be done in postOptimization.
+  SmallPtrSet<MachineInstr *, 32> *DeadRemats;
+
+  /// Remattable - Values defined by remattable instructions as identified by
+  /// tii.isTriviallyReMaterializable().
+  SmallPtrSet<const VNInfo *, 4> Remattable;
+
+  /// Rematted - Values that were actually rematted, and so need to have their
+  /// live range trimmed or entirely removed.
+  SmallPtrSet<const VNInfo *, 4> Rematted;
+
+  /// scanRemattable - Identify the Parent values that may rematerialize.
+  void scanRemattable(AliasAnalysis *aa);
+
+  /// allUsesAvailableAt - Return true if all registers used by OrigMI at
+  /// OrigIdx are also available with the same value at UseIdx.
+  bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
+                          SlotIndex UseIdx) const;
+
+  /// foldAsLoad - If LI has a single use and a single def that can be folded as
+  /// a load, eliminate the register by folding the def into the use.
+  bool foldAsLoad(LiveInterval *LI, SmallVectorImpl<MachineInstr *> &Dead);
+
+  using ToShrinkSet = SetVector<LiveInterval *, SmallVector<LiveInterval *, 8>,
+                                SmallPtrSet<LiveInterval *, 8>>;
+
+  /// Helper for eliminateDeadDefs.
+  void eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink,
+                        AliasAnalysis *AA);
+
+  /// MachineRegisterInfo callback to notify when new virtual
+  /// registers are created.
+  void MRI_NoteNewVirtualRegister(unsigned VReg) override;
+
+  /// \brief Check if MachineOperand \p MO is a last use/kill either in the
+  /// main live range of \p LI or in one of the matching subregister ranges.
+  bool useIsKill(const LiveInterval &LI, const MachineOperand &MO) const;
+
+  /// Create a new empty interval based on OldReg.
+  LiveInterval &createEmptyIntervalFrom(unsigned OldReg, bool createSubRanges);
+
+public:
+  /// Create a LiveRangeEdit for breaking down parent into smaller pieces.
+  /// @param parent The register being spilled or split.
+  /// @param newRegs List to receive any new registers created. This needn't be
+  ///                empty initially, any existing registers are ignored.
+  /// @param MF The MachineFunction the live range edit is taking place in.
+  /// @param lis The collection of all live intervals in this function.
+  /// @param vrm Map of virtual registers to physical registers for this
+  ///            function.  If NULL, no virtual register map updates will
+  ///            be done.  This could be the case if called before Regalloc.
+  /// @param deadRemats The collection of all the instructions defining an
+  ///                   original reg and are dead after remat.
+  LiveRangeEdit(LiveInterval *parent, SmallVectorImpl<unsigned> &newRegs,
+                MachineFunction &MF, LiveIntervals &lis, VirtRegMap *vrm,
+                Delegate *delegate = nullptr,
+                SmallPtrSet<MachineInstr *, 32> *deadRemats = nullptr)
+      : Parent(parent), NewRegs(newRegs), MRI(MF.getRegInfo()), LIS(lis),
+        VRM(vrm), TII(*MF.getSubtarget().getInstrInfo()), TheDelegate(delegate),
+        FirstNew(newRegs.size()), DeadRemats(deadRemats) {
+    MRI.setDelegate(this);
+  }
+
+  ~LiveRangeEdit() override { MRI.resetDelegate(this); }
+
+  LiveInterval &getParent() const {
+    assert(Parent && "No parent LiveInterval");
+    return *Parent;
+  }
+
+  unsigned getReg() const { return getParent().reg; }
+
+  /// Iterator for accessing the new registers added by this edit.
+  using iterator = SmallVectorImpl<unsigned>::const_iterator;
+  iterator begin() const { return NewRegs.begin() + FirstNew; }
+  iterator end() const { return NewRegs.end(); }
+  unsigned size() const { return NewRegs.size() - FirstNew; }
+  bool empty() const { return size() == 0; }
+  unsigned get(unsigned idx) const { return NewRegs[idx + FirstNew]; }
+
+  /// pop_back - It allows LiveRangeEdit users to drop new registers.
+  /// The context is when an original def instruction of a register is
+  /// dead after rematerialization, we still want to keep it for following
+  /// rematerializations. We save the def instruction in DeadRemats,
+  /// and replace the original dst register with a new dummy register so
+  /// the live range of original dst register can be shrinked normally.
+  /// We don't want to allocate phys register for the dummy register, so
+  /// we want to drop it from the NewRegs set.
+  void pop_back() { NewRegs.pop_back(); }
+
+  ArrayRef<unsigned> regs() const {
+    return makeArrayRef(NewRegs).slice(FirstNew);
+  }
+
+  /// createFrom - Create a new virtual register based on OldReg.
+  unsigned createFrom(unsigned OldReg);
+
+  /// create - Create a new register with the same class and original slot as
+  /// parent.
+  LiveInterval &createEmptyInterval() {
+    return createEmptyIntervalFrom(getReg(), true);
+  }
+
+  unsigned create() { return createFrom(getReg()); }
+
+  /// anyRematerializable - Return true if any parent values may be
+  /// rematerializable.
+  /// This function must be called before any rematerialization is attempted.
+  bool anyRematerializable(AliasAnalysis *);
+
+  /// checkRematerializable - Manually add VNI to the list of rematerializable
+  /// values if DefMI may be rematerializable.
+  bool checkRematerializable(VNInfo *VNI, const MachineInstr *DefMI,
+                             AliasAnalysis *);
+
+  /// Remat - Information needed to rematerialize at a specific location.
+  struct Remat {
+    VNInfo *ParentVNI;              // parent_'s value at the remat location.
+    MachineInstr *OrigMI = nullptr; // Instruction defining OrigVNI. It contains
+                                    // the real expr for remat.
+
+    explicit Remat(VNInfo *ParentVNI) : ParentVNI(ParentVNI) {}
+  };
+
+  /// canRematerializeAt - Determine if ParentVNI can be rematerialized at
+  /// UseIdx. It is assumed that parent_.getVNINfoAt(UseIdx) == ParentVNI.
+  /// When cheapAsAMove is set, only cheap remats are allowed.
+  bool canRematerializeAt(Remat &RM, VNInfo *OrigVNI, SlotIndex UseIdx,
+                          bool cheapAsAMove);
+
+  /// rematerializeAt - Rematerialize RM.ParentVNI into DestReg by inserting an
+  /// instruction into MBB before MI. The new instruction is mapped, but
+  /// liveness is not updated.
+  /// Return the SlotIndex of the new instruction.
+  SlotIndex rematerializeAt(MachineBasicBlock &MBB,
+                            MachineBasicBlock::iterator MI, unsigned DestReg,
+                            const Remat &RM, const TargetRegisterInfo &,
+                            bool Late = false);
+
+  /// markRematerialized - explicitly mark a value as rematerialized after doing
+  /// it manually.
+  void markRematerialized(const VNInfo *ParentVNI) {
+    Rematted.insert(ParentVNI);
+  }
+
+  /// didRematerialize - Return true if ParentVNI was rematerialized anywhere.
+  bool didRematerialize(const VNInfo *ParentVNI) const {
+    return Rematted.count(ParentVNI);
+  }
+
+  /// eraseVirtReg - Notify the delegate that Reg is no longer in use, and try
+  /// to erase it from LIS.
+  void eraseVirtReg(unsigned Reg);
+
+  /// eliminateDeadDefs - Try to delete machine instructions that are now dead
+  /// (allDefsAreDead returns true). This may cause live intervals to be trimmed
+  /// and further dead efs to be eliminated.
+  /// RegsBeingSpilled lists registers currently being spilled by the register
+  /// allocator.  These registers should not be split into new intervals
+  /// as currently those new intervals are not guaranteed to spill.
+  void eliminateDeadDefs(SmallVectorImpl<MachineInstr *> &Dead,
+                         ArrayRef<unsigned> RegsBeingSpilled = None,
+                         AliasAnalysis *AA = nullptr);
+
+  /// calculateRegClassAndHint - Recompute register class and hint for each new
+  /// register.
+  void calculateRegClassAndHint(MachineFunction &, const MachineLoopInfo &,
+                                const MachineBlockFrequencyInfo &);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_LIVERANGEEDIT_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveRegMatrix.h b/linux-x64/clang/include/llvm/CodeGen/LiveRegMatrix.h
new file mode 100644
index 0000000..f62a55c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveRegMatrix.h
@@ -0,0 +1,160 @@
+//===- LiveRegMatrix.h - Track register interference ----------*- C++ -*---===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The LiveRegMatrix analysis pass keeps track of virtual register interference
+// along two dimensions: Slot indexes and register units. The matrix is used by
+// register allocators to ensure that no interfering virtual registers get
+// assigned to overlapping physical registers.
+//
+// Register units are defined in MCRegisterInfo.h, they represent the smallest
+// unit of interference when dealing with overlapping physical registers. The
+// LiveRegMatrix is represented as a LiveIntervalUnion per register unit. When
+// a virtual register is assigned to a physical register, the live range for
+// the virtual register is inserted into the LiveIntervalUnion for each regunit
+// in the physreg.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEREGMATRIX_H
+#define LLVM_CODEGEN_LIVEREGMATRIX_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/CodeGen/LiveIntervalUnion.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include <memory>
+
+namespace llvm {
+
+class AnalysisUsage;
+class LiveInterval;
+class LiveIntervals;
+class MachineFunction;
+class TargetRegisterInfo;
+class VirtRegMap;
+
+class LiveRegMatrix : public MachineFunctionPass {
+  const TargetRegisterInfo *TRI;
+  LiveIntervals *LIS;
+  VirtRegMap *VRM;
+
+  // UserTag changes whenever virtual registers have been modified.
+  unsigned UserTag = 0;
+
+  // The matrix is represented as a LiveIntervalUnion per register unit.
+  LiveIntervalUnion::Allocator LIUAlloc;
+  LiveIntervalUnion::Array Matrix;
+
+  // Cached queries per register unit.
+  std::unique_ptr<LiveIntervalUnion::Query[]> Queries;
+
+  // Cached register mask interference info.
+  unsigned RegMaskTag = 0;
+  unsigned RegMaskVirtReg = 0;
+  BitVector RegMaskUsable;
+
+  // MachineFunctionPass boilerplate.
+  void getAnalysisUsage(AnalysisUsage &) const override;
+  bool runOnMachineFunction(MachineFunction &) override;
+  void releaseMemory() override;
+
+public:
+  static char ID;
+
+  LiveRegMatrix();
+
+  //===--------------------------------------------------------------------===//
+  // High-level interface.
+  //===--------------------------------------------------------------------===//
+  //
+  // Check for interference before assigning virtual registers to physical
+  // registers.
+  //
+
+  /// Invalidate cached interference queries after modifying virtual register
+  /// live ranges. Interference checks may return stale information unless
+  /// caches are invalidated.
+  void invalidateVirtRegs() { ++UserTag; }
+
+  enum InterferenceKind {
+    /// No interference, go ahead and assign.
+    IK_Free = 0,
+
+    /// Virtual register interference. There are interfering virtual registers
+    /// assigned to PhysReg or its aliases. This interference could be resolved
+    /// by unassigning those other virtual registers.
+    IK_VirtReg,
+
+    /// Register unit interference. A fixed live range is in the way, typically
+    /// argument registers for a call. This can't be resolved by unassigning
+    /// other virtual registers.
+    IK_RegUnit,
+
+    /// RegMask interference. The live range is crossing an instruction with a
+    /// regmask operand that doesn't preserve PhysReg. This typically means
+    /// VirtReg is live across a call, and PhysReg isn't call-preserved.
+    IK_RegMask
+  };
+
+  /// Check for interference before assigning VirtReg to PhysReg.
+  /// If this function returns IK_Free, it is legal to assign(VirtReg, PhysReg).
+  /// When there is more than one kind of interference, the InterferenceKind
+  /// with the highest enum value is returned.
+  InterferenceKind checkInterference(LiveInterval &VirtReg, unsigned PhysReg);
+
+  /// Check for interference in the segment [Start, End) that may prevent
+  /// assignment to PhysReg. If this function returns true, there is
+  /// interference in the segment [Start, End) of some other interval already
+  /// assigned to PhysReg. If this function returns false, PhysReg is free at
+  /// the segment [Start, End).
+  bool checkInterference(SlotIndex Start, SlotIndex End, unsigned PhysReg);
+
+  /// Assign VirtReg to PhysReg.
+  /// This will mark VirtReg's live range as occupied in the LiveRegMatrix and
+  /// update VirtRegMap. The live range is expected to be available in PhysReg.
+  void assign(LiveInterval &VirtReg, unsigned PhysReg);
+
+  /// Unassign VirtReg from its PhysReg.
+  /// Assuming that VirtReg was previously assigned to a PhysReg, this undoes
+  /// the assignment and updates VirtRegMap accordingly.
+  void unassign(LiveInterval &VirtReg);
+
+  /// Returns true if the given \p PhysReg has any live intervals assigned.
+  bool isPhysRegUsed(unsigned PhysReg) const;
+
+  //===--------------------------------------------------------------------===//
+  // Low-level interface.
+  //===--------------------------------------------------------------------===//
+  //
+  // Provide access to the underlying LiveIntervalUnions.
+  //
+
+  /// Check for regmask interference only.
+  /// Return true if VirtReg crosses a regmask operand that clobbers PhysReg.
+  /// If PhysReg is null, check if VirtReg crosses any regmask operands.
+  bool checkRegMaskInterference(LiveInterval &VirtReg, unsigned PhysReg = 0);
+
+  /// Check for regunit interference only.
+  /// Return true if VirtReg overlaps a fixed assignment of one of PhysRegs's
+  /// register units.
+  bool checkRegUnitInterference(LiveInterval &VirtReg, unsigned PhysReg);
+
+  /// Query a line of the assigned virtual register matrix directly.
+  /// Use MCRegUnitIterator to enumerate all regunits in the desired PhysReg.
+  /// This returns a reference to an internal Query data structure that is only
+  /// valid until the next query() call.
+  LiveIntervalUnion::Query &query(const LiveRange &LR, unsigned RegUnit);
+
+  /// Directly access the live interval unions per regunit.
+  /// This returns an array indexed by the regunit number.
+  LiveIntervalUnion *getLiveUnions() { return &Matrix[0]; }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_LIVEREGMATRIX_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveRegUnits.h b/linux-x64/clang/include/llvm/CodeGen/LiveRegUnits.h
new file mode 100644
index 0000000..dc4956d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveRegUnits.h
@@ -0,0 +1,135 @@
+//===- llvm/CodeGen/LiveRegUnits.h - Register Unit Set ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// A set of register units. It is intended for register liveness tracking.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEREGUNITS_H
+#define LLVM_CODEGEN_LIVEREGUNITS_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/MC/LaneBitmask.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include <cstdint>
+
+namespace llvm {
+
+class MachineInstr;
+class MachineBasicBlock;
+
+/// A set of register units used to track register liveness.
+class LiveRegUnits {
+  const TargetRegisterInfo *TRI = nullptr;
+  BitVector Units;
+
+public:
+  /// Constructs a new empty LiveRegUnits set.
+  LiveRegUnits() = default;
+
+  /// Constructs and initialize an empty LiveRegUnits set.
+  LiveRegUnits(const TargetRegisterInfo &TRI) {
+    init(TRI);
+  }
+
+  /// Initialize and clear the set.
+  void init(const TargetRegisterInfo &TRI) {
+    this->TRI = &TRI;
+    Units.reset();
+    Units.resize(TRI.getNumRegUnits());
+  }
+
+  /// Clears the set.
+  void clear() { Units.reset(); }
+
+  /// Returns true if the set is empty.
+  bool empty() const { return Units.none(); }
+
+  /// Adds register units covered by physical register \p Reg.
+  void addReg(unsigned Reg) {
+    for (MCRegUnitIterator Unit(Reg, TRI); Unit.isValid(); ++Unit)
+      Units.set(*Unit);
+  }
+
+  /// \brief Adds register units covered by physical register \p Reg that are
+  /// part of the lanemask \p Mask.
+  void addRegMasked(unsigned Reg, LaneBitmask Mask) {
+    for (MCRegUnitMaskIterator Unit(Reg, TRI); Unit.isValid(); ++Unit) {
+      LaneBitmask UnitMask = (*Unit).second;
+      if (UnitMask.none() || (UnitMask & Mask).any())
+        Units.set((*Unit).first);
+    }
+  }
+
+  /// Removes all register units covered by physical register \p Reg.
+  void removeReg(unsigned Reg) {
+    for (MCRegUnitIterator Unit(Reg, TRI); Unit.isValid(); ++Unit)
+      Units.reset(*Unit);
+  }
+
+  /// Removes register units not preserved by the regmask \p RegMask.
+  /// The regmask has the same format as the one in the RegMask machine operand.
+  void removeRegsNotPreserved(const uint32_t *RegMask);
+
+  /// Adds register units not preserved by the regmask \p RegMask.
+  /// The regmask has the same format as the one in the RegMask machine operand.
+  void addRegsInMask(const uint32_t *RegMask);
+
+  /// Returns true if no part of physical register \p Reg is live.
+  bool available(unsigned Reg) const {
+    for (MCRegUnitIterator Unit(Reg, TRI); Unit.isValid(); ++Unit) {
+      if (Units.test(*Unit))
+        return false;
+    }
+    return true;
+  }
+
+  /// Updates liveness when stepping backwards over the instruction \p MI.
+  /// This removes all register units defined or clobbered in \p MI and then
+  /// adds the units used (as in use operands) in \p MI.
+  void stepBackward(const MachineInstr &MI);
+
+  /// Adds all register units used, defined or clobbered in \p MI.
+  /// This is useful when walking over a range of instruction to find registers
+  /// unused over the whole range.
+  void accumulate(const MachineInstr &MI);
+
+  /// Adds registers living out of block \p MBB.
+  /// Live out registers are the union of the live-in registers of the successor
+  /// blocks and pristine registers. Live out registers of the end block are the
+  /// callee saved registers.
+  void addLiveOuts(const MachineBasicBlock &MBB);
+
+  /// Adds registers living into block \p MBB.
+  void addLiveIns(const MachineBasicBlock &MBB);
+
+  /// Adds all register units marked in the bitvector \p RegUnits.
+  void addUnits(const BitVector &RegUnits) {
+    Units |= RegUnits;
+  }
+  /// Removes all register units marked in the bitvector \p RegUnits.
+  void removeUnits(const BitVector &RegUnits) {
+    Units.reset(RegUnits);
+  }
+  /// Return the internal bitvector representation of the set.
+  const BitVector &getBitVector() const {
+    return Units;
+  }
+
+private:
+  /// Adds pristine registers. Pristine registers are callee saved registers
+  /// that are unused in the function.
+  void addPristines(const MachineFunction &MF);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_LIVEREGUNITS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveStacks.h b/linux-x64/clang/include/llvm/CodeGen/LiveStacks.h
new file mode 100644
index 0000000..44ed785
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveStacks.h
@@ -0,0 +1,103 @@
+//===- LiveStacks.h - Live Stack Slot Analysis ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the live stack slot analysis pass. It is analogous to
+// live interval analysis except it's analyzing liveness of stack slots rather
+// than registers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVESTACKS_H
+#define LLVM_CODEGEN_LIVESTACKS_H
+
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Pass.h"
+#include <cassert>
+#include <map>
+#include <unordered_map>
+
+namespace llvm {
+
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+class LiveStacks : public MachineFunctionPass {
+  const TargetRegisterInfo *TRI;
+
+  /// Special pool allocator for VNInfo's (LiveInterval val#).
+  ///
+  VNInfo::Allocator VNInfoAllocator;
+
+  /// S2IMap - Stack slot indices to live interval mapping.
+  using SS2IntervalMap = std::unordered_map<int, LiveInterval>;
+  SS2IntervalMap S2IMap;
+
+  /// S2RCMap - Stack slot indices to register class mapping.
+  std::map<int, const TargetRegisterClass *> S2RCMap;
+
+public:
+  static char ID; // Pass identification, replacement for typeid
+
+  LiveStacks() : MachineFunctionPass(ID) {
+    initializeLiveStacksPass(*PassRegistry::getPassRegistry());
+  }
+
+  using iterator = SS2IntervalMap::iterator;
+  using const_iterator = SS2IntervalMap::const_iterator;
+
+  const_iterator begin() const { return S2IMap.begin(); }
+  const_iterator end() const { return S2IMap.end(); }
+  iterator begin() { return S2IMap.begin(); }
+  iterator end() { return S2IMap.end(); }
+
+  unsigned getNumIntervals() const { return (unsigned)S2IMap.size(); }
+
+  LiveInterval &getOrCreateInterval(int Slot, const TargetRegisterClass *RC);
+
+  LiveInterval &getInterval(int Slot) {
+    assert(Slot >= 0 && "Spill slot indice must be >= 0");
+    SS2IntervalMap::iterator I = S2IMap.find(Slot);
+    assert(I != S2IMap.end() && "Interval does not exist for stack slot");
+    return I->second;
+  }
+
+  const LiveInterval &getInterval(int Slot) const {
+    assert(Slot >= 0 && "Spill slot indice must be >= 0");
+    SS2IntervalMap::const_iterator I = S2IMap.find(Slot);
+    assert(I != S2IMap.end() && "Interval does not exist for stack slot");
+    return I->second;
+  }
+
+  bool hasInterval(int Slot) const { return S2IMap.count(Slot); }
+
+  const TargetRegisterClass *getIntervalRegClass(int Slot) const {
+    assert(Slot >= 0 && "Spill slot indice must be >= 0");
+    std::map<int, const TargetRegisterClass *>::const_iterator I =
+        S2RCMap.find(Slot);
+    assert(I != S2RCMap.end() &&
+           "Register class info does not exist for stack slot");
+    return I->second;
+  }
+
+  VNInfo::Allocator &getVNInfoAllocator() { return VNInfoAllocator; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+  void releaseMemory() override;
+
+  /// runOnMachineFunction - pass entry point
+  bool runOnMachineFunction(MachineFunction &) override;
+
+  /// print - Implement the dump method.
+  void print(raw_ostream &O, const Module * = nullptr) const override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/LiveVariables.h b/linux-x64/clang/include/llvm/CodeGen/LiveVariables.h
new file mode 100644
index 0000000..ed8da86
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LiveVariables.h
@@ -0,0 +1,309 @@
+//===-- llvm/CodeGen/LiveVariables.h - Live Variable Analysis ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LiveVariables analysis pass.  For each machine
+// instruction in the function, this pass calculates the set of registers that
+// are immediately dead after the instruction (i.e., the instruction calculates
+// the value, but it is never used) and the set of registers that are used by
+// the instruction, but are never used after the instruction (i.e., they are
+// killed).
+//
+// This class computes live variables using a sparse implementation based on
+// the machine code SSA form.  This class computes live variable information for
+// each virtual and _register allocatable_ physical register in a function.  It
+// uses the dominance properties of SSA form to efficiently compute live
+// variables for virtual registers, and assumes that physical registers are only
+// live within a single basic block (allowing it to do a single local analysis
+// to resolve physical register lifetimes in each basic block).  If a physical
+// register is not register allocatable, it is not tracked.  This is useful for
+// things like the stack pointer and condition codes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEVARIABLES_H
+#define LLVM_CODEGEN_LIVEVARIABLES_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SparseBitVector.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineRegisterInfo;
+
+class LiveVariables : public MachineFunctionPass {
+public:
+  static char ID; // Pass identification, replacement for typeid
+  LiveVariables() : MachineFunctionPass(ID) {
+    initializeLiveVariablesPass(*PassRegistry::getPassRegistry());
+  }
+
+  /// VarInfo - This represents the regions where a virtual register is live in
+  /// the program.  We represent this with three different pieces of
+  /// information: the set of blocks in which the instruction is live
+  /// throughout, the set of blocks in which the instruction is actually used,
+  /// and the set of non-phi instructions that are the last users of the value.
+  ///
+  /// In the common case where a value is defined and killed in the same block,
+  /// There is one killing instruction, and AliveBlocks is empty.
+  ///
+  /// Otherwise, the value is live out of the block.  If the value is live
+  /// throughout any blocks, these blocks are listed in AliveBlocks.  Blocks
+  /// where the liveness range ends are not included in AliveBlocks, instead
+  /// being captured by the Kills set.  In these blocks, the value is live into
+  /// the block (unless the value is defined and killed in the same block) and
+  /// lives until the specified instruction.  Note that there cannot ever be a
+  /// value whose Kills set contains two instructions from the same basic block.
+  ///
+  /// PHI nodes complicate things a bit.  If a PHI node is the last user of a
+  /// value in one of its predecessor blocks, it is not listed in the kills set,
+  /// but does include the predecessor block in the AliveBlocks set (unless that
+  /// block also defines the value).  This leads to the (perfectly sensical)
+  /// situation where a value is defined in a block, and the last use is a phi
+  /// node in the successor.  In this case, AliveBlocks is empty (the value is
+  /// not live across any  blocks) and Kills is empty (phi nodes are not
+  /// included). This is sensical because the value must be live to the end of
+  /// the block, but is not live in any successor blocks.
+  struct VarInfo {
+    /// AliveBlocks - Set of blocks in which this value is alive completely
+    /// through.  This is a bit set which uses the basic block number as an
+    /// index.
+    ///
+    SparseBitVector<> AliveBlocks;
+
+    /// Kills - List of MachineInstruction's which are the last use of this
+    /// virtual register (kill it) in their basic block.
+    ///
+    std::vector<MachineInstr*> Kills;
+
+    /// removeKill - Delete a kill corresponding to the specified
+    /// machine instruction. Returns true if there was a kill
+    /// corresponding to this instruction, false otherwise.
+    bool removeKill(MachineInstr &MI) {
+      std::vector<MachineInstr *>::iterator I = find(Kills, &MI);
+      if (I == Kills.end())
+        return false;
+      Kills.erase(I);
+      return true;
+    }
+
+    /// findKill - Find a kill instruction in MBB. Return NULL if none is found.
+    MachineInstr *findKill(const MachineBasicBlock *MBB) const;
+
+    /// isLiveIn - Is Reg live in to MBB? This means that Reg is live through
+    /// MBB, or it is killed in MBB. If Reg is only used by PHI instructions in
+    /// MBB, it is not considered live in.
+    bool isLiveIn(const MachineBasicBlock &MBB,
+                  unsigned Reg,
+                  MachineRegisterInfo &MRI);
+
+    void dump() const;
+  };
+
+private:
+  /// VirtRegInfo - This list is a mapping from virtual register number to
+  /// variable information.
+  ///
+  IndexedMap<VarInfo, VirtReg2IndexFunctor> VirtRegInfo;
+
+  /// PHIJoins - list of virtual registers that are PHI joins. These registers
+  /// may have multiple definitions, and they require special handling when
+  /// building live intervals.
+  SparseBitVector<> PHIJoins;
+
+private:   // Intermediate data structures
+  MachineFunction *MF;
+
+  MachineRegisterInfo* MRI;
+
+  const TargetRegisterInfo *TRI;
+
+  // PhysRegInfo - Keep track of which instruction was the last def of a
+  // physical register. This is a purely local property, because all physical
+  // register references are presumed dead across basic blocks.
+  std::vector<MachineInstr *> PhysRegDef;
+
+  // PhysRegInfo - Keep track of which instruction was the last use of a
+  // physical register. This is a purely local property, because all physical
+  // register references are presumed dead across basic blocks.
+  std::vector<MachineInstr *> PhysRegUse;
+
+  std::vector<SmallVector<unsigned, 4>> PHIVarInfo;
+
+  // DistanceMap - Keep track the distance of a MI from the start of the
+  // current basic block.
+  DenseMap<MachineInstr*, unsigned> DistanceMap;
+
+  /// HandlePhysRegKill - Add kills of Reg and its sub-registers to the
+  /// uses. Pay special attention to the sub-register uses which may come below
+  /// the last use of the whole register.
+  bool HandlePhysRegKill(unsigned Reg, MachineInstr *MI);
+
+  /// HandleRegMask - Call HandlePhysRegKill for all registers clobbered by Mask.
+  void HandleRegMask(const MachineOperand&);
+
+  void HandlePhysRegUse(unsigned Reg, MachineInstr &MI);
+  void HandlePhysRegDef(unsigned Reg, MachineInstr *MI,
+                        SmallVectorImpl<unsigned> &Defs);
+  void UpdatePhysRegDefs(MachineInstr &MI, SmallVectorImpl<unsigned> &Defs);
+
+  /// FindLastRefOrPartRef - Return the last reference or partial reference of
+  /// the specified register.
+  MachineInstr *FindLastRefOrPartRef(unsigned Reg);
+
+  /// FindLastPartialDef - Return the last partial def of the specified
+  /// register. Also returns the sub-registers that're defined by the
+  /// instruction.
+  MachineInstr *FindLastPartialDef(unsigned Reg,
+                                   SmallSet<unsigned,4> &PartDefRegs);
+
+  /// analyzePHINodes - Gather information about the PHI nodes in here. In
+  /// particular, we want to map the variable information of a virtual
+  /// register which is used in a PHI node. We map that to the BB the vreg
+  /// is coming from.
+  void analyzePHINodes(const MachineFunction& Fn);
+
+  void runOnInstr(MachineInstr &MI, SmallVectorImpl<unsigned> &Defs);
+
+  void runOnBlock(MachineBasicBlock *MBB, unsigned NumRegs);
+public:
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  /// RegisterDefIsDead - Return true if the specified instruction defines the
+  /// specified register, but that definition is dead.
+  bool RegisterDefIsDead(MachineInstr &MI, unsigned Reg) const;
+
+  //===--------------------------------------------------------------------===//
+  //  API to update live variable information
+
+  /// replaceKillInstruction - Update register kill info by replacing a kill
+  /// instruction with a new one.
+  void replaceKillInstruction(unsigned Reg, MachineInstr &OldMI,
+                              MachineInstr &NewMI);
+
+  /// addVirtualRegisterKilled - Add information about the fact that the
+  /// specified register is killed after being used by the specified
+  /// instruction. If AddIfNotFound is true, add a implicit operand if it's
+  /// not found.
+  void addVirtualRegisterKilled(unsigned IncomingReg, MachineInstr &MI,
+                                bool AddIfNotFound = false) {
+    if (MI.addRegisterKilled(IncomingReg, TRI, AddIfNotFound))
+      getVarInfo(IncomingReg).Kills.push_back(&MI);
+  }
+
+  /// removeVirtualRegisterKilled - Remove the specified kill of the virtual
+  /// register from the live variable information. Returns true if the
+  /// variable was marked as killed by the specified instruction,
+  /// false otherwise.
+  bool removeVirtualRegisterKilled(unsigned reg, MachineInstr &MI) {
+    if (!getVarInfo(reg).removeKill(MI))
+      return false;
+
+    bool Removed = false;
+    for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+      MachineOperand &MO = MI.getOperand(i);
+      if (MO.isReg() && MO.isKill() && MO.getReg() == reg) {
+        MO.setIsKill(false);
+        Removed = true;
+        break;
+      }
+    }
+
+    assert(Removed && "Register is not used by this instruction!");
+    (void)Removed;
+    return true;
+  }
+
+  /// removeVirtualRegistersKilled - Remove all killed info for the specified
+  /// instruction.
+  void removeVirtualRegistersKilled(MachineInstr &MI);
+
+  /// addVirtualRegisterDead - Add information about the fact that the specified
+  /// register is dead after being used by the specified instruction. If
+  /// AddIfNotFound is true, add a implicit operand if it's not found.
+  void addVirtualRegisterDead(unsigned IncomingReg, MachineInstr &MI,
+                              bool AddIfNotFound = false) {
+    if (MI.addRegisterDead(IncomingReg, TRI, AddIfNotFound))
+      getVarInfo(IncomingReg).Kills.push_back(&MI);
+  }
+
+  /// removeVirtualRegisterDead - Remove the specified kill of the virtual
+  /// register from the live variable information. Returns true if the
+  /// variable was marked dead at the specified instruction, false
+  /// otherwise.
+  bool removeVirtualRegisterDead(unsigned reg, MachineInstr &MI) {
+    if (!getVarInfo(reg).removeKill(MI))
+      return false;
+
+    bool Removed = false;
+    for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+      MachineOperand &MO = MI.getOperand(i);
+      if (MO.isReg() && MO.isDef() && MO.getReg() == reg) {
+        MO.setIsDead(false);
+        Removed = true;
+        break;
+      }
+    }
+    assert(Removed && "Register is not defined by this instruction!");
+    (void)Removed;
+    return true;
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  void releaseMemory() override {
+    VirtRegInfo.clear();
+  }
+
+  /// getVarInfo - Return the VarInfo structure for the specified VIRTUAL
+  /// register.
+  VarInfo &getVarInfo(unsigned RegIdx);
+
+  void MarkVirtRegAliveInBlock(VarInfo& VRInfo, MachineBasicBlock* DefBlock,
+                               MachineBasicBlock *BB);
+  void MarkVirtRegAliveInBlock(VarInfo& VRInfo, MachineBasicBlock* DefBlock,
+                               MachineBasicBlock *BB,
+                               std::vector<MachineBasicBlock*> &WorkList);
+  void HandleVirtRegDef(unsigned reg, MachineInstr &MI);
+  void HandleVirtRegUse(unsigned reg, MachineBasicBlock *MBB, MachineInstr &MI);
+
+  bool isLiveIn(unsigned Reg, const MachineBasicBlock &MBB) {
+    return getVarInfo(Reg).isLiveIn(MBB, Reg, *MRI);
+  }
+
+  /// isLiveOut - Determine if Reg is live out from MBB, when not considering
+  /// PHI nodes. This means that Reg is either killed by a successor block or
+  /// passed through one.
+  bool isLiveOut(unsigned Reg, const MachineBasicBlock &MBB);
+
+  /// addNewBlock - Add a new basic block BB between DomBB and SuccBB. All
+  /// variables that are live out of DomBB and live into SuccBB will be marked
+  /// as passing live through BB. This method assumes that the machine code is
+  /// still in SSA form.
+  void addNewBlock(MachineBasicBlock *BB,
+                   MachineBasicBlock *DomBB,
+                   MachineBasicBlock *SuccBB);
+
+  /// isPHIJoin - Return true if Reg is a phi join register.
+  bool isPHIJoin(unsigned Reg) { return PHIJoins.test(Reg); }
+
+  /// setPHIJoin - Mark Reg as a phi join register.
+  void setPHIJoin(unsigned Reg) { PHIJoins.set(Reg); }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/LoopTraversal.h b/linux-x64/clang/include/llvm/CodeGen/LoopTraversal.h
new file mode 100644
index 0000000..a816f6d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LoopTraversal.h
@@ -0,0 +1,116 @@
+//==------ llvm/CodeGen/LoopTraversal.h - Loop Traversal -*- C++ -*---------==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Loop Traversal logic.
+///
+/// This class provides the basic blocks traversal order used by passes like
+/// ReachingDefAnalysis and ExecutionDomainFix.
+/// It identifies basic blocks that are part of loops and should to be visited
+/// twice and returns efficient traversal order for all the blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LOOPTRAVERSAL_H
+#define LLVM_CODEGEN_LOOPTRAVERSAL_H
+
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineFunction;
+
+/// This class provides the basic blocks traversal order used by passes like
+/// ReachingDefAnalysis and ExecutionDomainFix.
+/// It identifies basic blocks that are part of loops and should to be visited
+/// twice and returns efficient traversal order for all the blocks.
+///
+/// We want to visit every instruction in every basic block in order to update
+/// it's execution domain or collect clearance information. However, for the
+/// clearance calculation, we need to know clearances from all predecessors
+/// (including any backedges), therfore we need to visit some blocks twice.
+/// As an example, consider the following loop.
+///
+///
+///    PH -> A -> B (xmm<Undef> -> xmm<Def>) -> C -> D -> EXIT
+///          ^                                  |
+///          +----------------------------------+
+///
+/// The iteration order this pass will return is as follows:
+/// Optimized: PH A B C A' B' C' D
+///
+/// The basic block order is constructed as follows:
+/// Once we finish processing some block, we update the counters in MBBInfos
+/// and re-process any successors that are now 'done'.
+/// We call a block that is ready for its final round of processing `done`
+/// (isBlockDone), e.g. when all predecessor information is known.
+///
+/// Note that a naive traversal order would be to do two complete passes over
+/// all basic blocks/instructions, the first for recording clearances, the
+/// second for updating clearance based on backedges.
+/// However, for functions without backedges, or functions with a lot of
+/// straight-line code, and a small loop, that would be a lot of unnecessary
+/// work (since only the BBs that are part of the loop require two passes).
+///
+/// E.g., the naive iteration order for the above exmple is as follows:
+/// Naive: PH A B C D A' B' C' D'
+///
+/// In the optimized approach we avoid processing D twice, because we
+/// can entirely process the predecessors before getting to D.
+class LoopTraversal {
+private:
+  struct MBBInfo {
+    /// Whether we have gotten to this block in primary processing yet.
+    bool PrimaryCompleted = false;
+
+    /// The number of predecessors for which primary processing has completed
+    unsigned IncomingProcessed = 0;
+
+    /// The value of `IncomingProcessed` at the start of primary processing
+    unsigned PrimaryIncoming = 0;
+
+    /// The number of predecessors for which all processing steps are done.
+    unsigned IncomingCompleted = 0;
+
+    MBBInfo() = default;
+  };
+  using MBBInfoMap = SmallVector<MBBInfo, 4>;
+  /// Helps keep track if we proccessed this block and all its predecessors.
+  MBBInfoMap MBBInfos;
+
+public:
+  struct TraversedMBBInfo {
+    /// The basic block.
+    MachineBasicBlock *MBB = nullptr;
+
+    /// True if this is the first time we process the basic block.
+    bool PrimaryPass = true;
+
+    /// True if the block that is ready for its final round of processing.
+    bool IsDone = true;
+
+    TraversedMBBInfo(MachineBasicBlock *BB = nullptr, bool Primary = true,
+                     bool Done = true)
+        : MBB(BB), PrimaryPass(Primary), IsDone(Done) {}
+  };
+  LoopTraversal() {}
+
+  /// \brief Identifies basic blocks that are part of loops and should to be
+  ///  visited twice and returns efficient traversal order for all the blocks.
+  typedef SmallVector<TraversedMBBInfo, 4> TraversalOrder;
+  TraversalOrder traverse(MachineFunction &MF);
+
+private:
+  /// Returens true if the block is ready for its final round of processing.
+  bool isBlockDone(MachineBasicBlock *MBB);
+};
+
+} // namespace llvm
+
+#endif // LLVM_CODEGEN_LOOPTRAVERSAL_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/LowLevelType.h b/linux-x64/clang/include/llvm/CodeGen/LowLevelType.h
new file mode 100644
index 0000000..a3c5c93
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/LowLevelType.h
@@ -0,0 +1,32 @@
+//== llvm/CodeGen/LowLevelType.h ------------------------------- -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// Implement a low-level type suitable for MachineInstr level instruction
+/// selection.
+///
+/// This provides the CodeGen aspects of LowLevelType, such as Type conversion.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LOWLEVELTYPE_H
+#define LLVM_CODEGEN_LOWLEVELTYPE_H
+
+#include "llvm/Support/LowLevelTypeImpl.h"
+
+namespace llvm {
+
+class DataLayout;
+class Type;
+
+/// Construct a low-level type based on an LLVM type.
+LLT getLLTForType(Type &Ty, const DataLayout &DL);
+
+}
+
+#endif // LLVM_CODEGEN_LOWLEVELTYPE_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MIRParser/MIRParser.h b/linux-x64/clang/include/llvm/CodeGen/MIRParser/MIRParser.h
new file mode 100644
index 0000000..b631a8c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MIRParser/MIRParser.h
@@ -0,0 +1,81 @@
+//===- MIRParser.h - MIR serialization format parser ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This MIR serialization library is currently a work in progress. It can't
+// serialize machine functions at this time.
+//
+// This file declares the functions that parse the MIR serialization format
+// files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MIRPARSER_MIRPARSER_H
+#define LLVM_CODEGEN_MIRPARSER_MIRPARSER_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <memory>
+
+namespace llvm {
+
+class StringRef;
+class MIRParserImpl;
+class MachineModuleInfo;
+class SMDiagnostic;
+
+/// This class initializes machine functions by applying the state loaded from
+/// a MIR file.
+class MIRParser {
+  std::unique_ptr<MIRParserImpl> Impl;
+
+public:
+  MIRParser(std::unique_ptr<MIRParserImpl> Impl);
+  MIRParser(const MIRParser &) = delete;
+  ~MIRParser();
+
+  /// Parses the optional LLVM IR module in the MIR file.
+  ///
+  /// A new, empty module is created if the LLVM IR isn't present.
+  /// \returns nullptr if a parsing error occurred.
+  std::unique_ptr<Module> parseIRModule();
+
+  /// \brief Parses MachineFunctions in the MIR file and add them to the given
+  /// MachineModuleInfo \p MMI.
+  ///
+  /// \returns true if an error occurred.
+  bool parseMachineFunctions(Module &M, MachineModuleInfo &MMI);
+};
+
+/// This function is the main interface to the MIR serialization format parser.
+///
+/// It reads in a MIR file and returns a MIR parser that can parse the embedded
+/// LLVM IR module and initialize the machine functions by parsing the machine
+/// function's state.
+///
+/// \param Filename - The name of the file to parse.
+/// \param Error - Error result info.
+/// \param Context - Context which will be used for the parsed LLVM IR module.
+std::unique_ptr<MIRParser> createMIRParserFromFile(StringRef Filename,
+                                                   SMDiagnostic &Error,
+                                                   LLVMContext &Context);
+
+/// This function is another interface to the MIR serialization format parser.
+///
+/// It returns a MIR parser that works with the given memory buffer and that can
+/// parse the embedded LLVM IR module and initialize the machine functions by
+/// parsing the machine function's state.
+///
+/// \param Contents - The MemoryBuffer containing the machine level IR.
+/// \param Context - Context which will be used for the parsed LLVM IR module.
+std::unique_ptr<MIRParser>
+createMIRParser(std::unique_ptr<MemoryBuffer> Contents, LLVMContext &Context);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MIRPARSER_MIRPARSER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MIRPrinter.h b/linux-x64/clang/include/llvm/CodeGen/MIRPrinter.h
new file mode 100644
index 0000000..c73adc3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MIRPrinter.h
@@ -0,0 +1,46 @@
+//===- MIRPrinter.h - MIR serialization format printer --------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the functions that print out the LLVM IR and the machine
+// functions using the MIR serialization format.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_CODEGEN_MIRPRINTER_H
+#define LLVM_LIB_CODEGEN_MIRPRINTER_H
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineFunction;
+class Module;
+class raw_ostream;
+template <typename T> class SmallVectorImpl;
+
+/// Print LLVM IR using the MIR serialization format to the given output stream.
+void printMIR(raw_ostream &OS, const Module &M);
+
+/// Print a machine function using the MIR serialization format to the given
+/// output stream.
+void printMIR(raw_ostream &OS, const MachineFunction &MF);
+
+/// Determine a possible list of successors of a basic block based on the
+/// basic block machine operand being used inside the block. This should give
+/// you the correct list of successor blocks in most cases except for things
+/// like jump tables where the basic block references can't easily be found.
+/// The MIRPRinter will skip printing successors if they match the result of
+/// this funciton and the parser will use this function to construct a list if
+/// it is missing.
+void guessSuccessors(const MachineBasicBlock &MBB,
+                     SmallVectorImpl<MachineBasicBlock*> &Successors,
+                     bool &IsFallthrough);
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MIRYamlMapping.h b/linux-x64/clang/include/llvm/CodeGen/MIRYamlMapping.h
new file mode 100644
index 0000000..b75f9c8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MIRYamlMapping.h
@@ -0,0 +1,523 @@
+//===- MIRYAMLMapping.h - Describes the mapping between MIR and YAML ------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the mapping between various MIR data structures and
+// their corresponding YAML representation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MIRYAMLMAPPING_H
+#define LLVM_CODEGEN_MIRYAMLMAPPING_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/Support/SMLoc.h"
+#include "llvm/Support/YAMLTraits.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cstdint>
+#include <string>
+#include <vector>
+
+namespace llvm {
+namespace yaml {
+
+/// A wrapper around std::string which contains a source range that's being
+/// set during parsing.
+struct StringValue {
+  std::string Value;
+  SMRange SourceRange;
+
+  StringValue() = default;
+  StringValue(std::string Value) : Value(std::move(Value)) {}
+
+  bool operator==(const StringValue &Other) const {
+    return Value == Other.Value;
+  }
+};
+
+template <> struct ScalarTraits<StringValue> {
+  static void output(const StringValue &S, void *, raw_ostream &OS) {
+    OS << S.Value;
+  }
+
+  static StringRef input(StringRef Scalar, void *Ctx, StringValue &S) {
+    S.Value = Scalar.str();
+    if (const auto *Node =
+            reinterpret_cast<yaml::Input *>(Ctx)->getCurrentNode())
+      S.SourceRange = Node->getSourceRange();
+    return "";
+  }
+
+  static QuotingType mustQuote(StringRef S) { return needsQuotes(S); }
+};
+
+struct FlowStringValue : StringValue {
+  FlowStringValue() = default;
+  FlowStringValue(std::string Value) : StringValue(std::move(Value)) {}
+};
+
+template <> struct ScalarTraits<FlowStringValue> {
+  static void output(const FlowStringValue &S, void *, raw_ostream &OS) {
+    return ScalarTraits<StringValue>::output(S, nullptr, OS);
+  }
+
+  static StringRef input(StringRef Scalar, void *Ctx, FlowStringValue &S) {
+    return ScalarTraits<StringValue>::input(Scalar, Ctx, S);
+  }
+
+  static QuotingType mustQuote(StringRef S) { return needsQuotes(S); }
+};
+
+struct BlockStringValue {
+  StringValue Value;
+
+  bool operator==(const BlockStringValue &Other) const {
+    return Value == Other.Value;
+  }
+};
+
+template <> struct BlockScalarTraits<BlockStringValue> {
+  static void output(const BlockStringValue &S, void *Ctx, raw_ostream &OS) {
+    return ScalarTraits<StringValue>::output(S.Value, Ctx, OS);
+  }
+
+  static StringRef input(StringRef Scalar, void *Ctx, BlockStringValue &S) {
+    return ScalarTraits<StringValue>::input(Scalar, Ctx, S.Value);
+  }
+};
+
+/// A wrapper around unsigned which contains a source range that's being set
+/// during parsing.
+struct UnsignedValue {
+  unsigned Value = 0;
+  SMRange SourceRange;
+
+  UnsignedValue() = default;
+  UnsignedValue(unsigned Value) : Value(Value) {}
+
+  bool operator==(const UnsignedValue &Other) const {
+    return Value == Other.Value;
+  }
+};
+
+template <> struct ScalarTraits<UnsignedValue> {
+  static void output(const UnsignedValue &Value, void *Ctx, raw_ostream &OS) {
+    return ScalarTraits<unsigned>::output(Value.Value, Ctx, OS);
+  }
+
+  static StringRef input(StringRef Scalar, void *Ctx, UnsignedValue &Value) {
+    if (const auto *Node =
+            reinterpret_cast<yaml::Input *>(Ctx)->getCurrentNode())
+      Value.SourceRange = Node->getSourceRange();
+    return ScalarTraits<unsigned>::input(Scalar, Ctx, Value.Value);
+  }
+
+  static QuotingType mustQuote(StringRef Scalar) {
+    return ScalarTraits<unsigned>::mustQuote(Scalar);
+  }
+};
+
+template <> struct ScalarEnumerationTraits<MachineJumpTableInfo::JTEntryKind> {
+  static void enumeration(yaml::IO &IO,
+                          MachineJumpTableInfo::JTEntryKind &EntryKind) {
+    IO.enumCase(EntryKind, "block-address",
+                MachineJumpTableInfo::EK_BlockAddress);
+    IO.enumCase(EntryKind, "gp-rel64-block-address",
+                MachineJumpTableInfo::EK_GPRel64BlockAddress);
+    IO.enumCase(EntryKind, "gp-rel32-block-address",
+                MachineJumpTableInfo::EK_GPRel32BlockAddress);
+    IO.enumCase(EntryKind, "label-difference32",
+                MachineJumpTableInfo::EK_LabelDifference32);
+    IO.enumCase(EntryKind, "inline", MachineJumpTableInfo::EK_Inline);
+    IO.enumCase(EntryKind, "custom32", MachineJumpTableInfo::EK_Custom32);
+  }
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::StringValue)
+LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(llvm::yaml::FlowStringValue)
+LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(llvm::yaml::UnsignedValue)
+
+namespace llvm {
+namespace yaml {
+
+struct VirtualRegisterDefinition {
+  UnsignedValue ID;
+  StringValue Class;
+  StringValue PreferredRegister;
+
+  // TODO: Serialize the target specific register hints.
+
+  bool operator==(const VirtualRegisterDefinition &Other) const {
+    return ID == Other.ID && Class == Other.Class &&
+           PreferredRegister == Other.PreferredRegister;
+  }
+};
+
+template <> struct MappingTraits<VirtualRegisterDefinition> {
+  static void mapping(IO &YamlIO, VirtualRegisterDefinition &Reg) {
+    YamlIO.mapRequired("id", Reg.ID);
+    YamlIO.mapRequired("class", Reg.Class);
+    YamlIO.mapOptional("preferred-register", Reg.PreferredRegister,
+                       StringValue()); // Don't print out when it's empty.
+  }
+
+  static const bool flow = true;
+};
+
+struct MachineFunctionLiveIn {
+  StringValue Register;
+  StringValue VirtualRegister;
+
+  bool operator==(const MachineFunctionLiveIn &Other) const {
+    return Register == Other.Register &&
+           VirtualRegister == Other.VirtualRegister;
+  }
+};
+
+template <> struct MappingTraits<MachineFunctionLiveIn> {
+  static void mapping(IO &YamlIO, MachineFunctionLiveIn &LiveIn) {
+    YamlIO.mapRequired("reg", LiveIn.Register);
+    YamlIO.mapOptional(
+        "virtual-reg", LiveIn.VirtualRegister,
+        StringValue()); // Don't print the virtual register when it's empty.
+  }
+
+  static const bool flow = true;
+};
+
+/// Serializable representation of stack object from the MachineFrameInfo class.
+///
+/// The flags 'isImmutable' and 'isAliased' aren't serialized, as they are
+/// determined by the object's type and frame information flags.
+/// Dead stack objects aren't serialized.
+///
+/// The 'isPreallocated' flag is determined by the local offset.
+struct MachineStackObject {
+  enum ObjectType { DefaultType, SpillSlot, VariableSized };
+  UnsignedValue ID;
+  StringValue Name;
+  // TODO: Serialize unnamed LLVM alloca reference.
+  ObjectType Type = DefaultType;
+  int64_t Offset = 0;
+  uint64_t Size = 0;
+  unsigned Alignment = 0;
+  uint8_t StackID = 0;
+  StringValue CalleeSavedRegister;
+  bool CalleeSavedRestored = true;
+  Optional<int64_t> LocalOffset;
+  StringValue DebugVar;
+  StringValue DebugExpr;
+  StringValue DebugLoc;
+
+  bool operator==(const MachineStackObject &Other) const {
+    return ID == Other.ID && Name == Other.Name && Type == Other.Type &&
+           Offset == Other.Offset && Size == Other.Size &&
+           Alignment == Other.Alignment &&
+           StackID == Other.StackID &&
+           CalleeSavedRegister == Other.CalleeSavedRegister &&
+           CalleeSavedRestored == Other.CalleeSavedRestored &&
+           LocalOffset == Other.LocalOffset && DebugVar == Other.DebugVar &&
+           DebugExpr == Other.DebugExpr && DebugLoc == Other.DebugLoc;
+  }
+};
+
+template <> struct ScalarEnumerationTraits<MachineStackObject::ObjectType> {
+  static void enumeration(yaml::IO &IO, MachineStackObject::ObjectType &Type) {
+    IO.enumCase(Type, "default", MachineStackObject::DefaultType);
+    IO.enumCase(Type, "spill-slot", MachineStackObject::SpillSlot);
+    IO.enumCase(Type, "variable-sized", MachineStackObject::VariableSized);
+  }
+};
+
+template <> struct MappingTraits<MachineStackObject> {
+  static void mapping(yaml::IO &YamlIO, MachineStackObject &Object) {
+    YamlIO.mapRequired("id", Object.ID);
+    YamlIO.mapOptional("name", Object.Name,
+                       StringValue()); // Don't print out an empty name.
+    YamlIO.mapOptional(
+        "type", Object.Type,
+        MachineStackObject::DefaultType); // Don't print the default type.
+    YamlIO.mapOptional("offset", Object.Offset, (int64_t)0);
+    if (Object.Type != MachineStackObject::VariableSized)
+      YamlIO.mapRequired("size", Object.Size);
+    YamlIO.mapOptional("alignment", Object.Alignment, (unsigned)0);
+    YamlIO.mapOptional("stack-id", Object.StackID);
+    YamlIO.mapOptional("callee-saved-register", Object.CalleeSavedRegister,
+                       StringValue()); // Don't print it out when it's empty.
+    YamlIO.mapOptional("callee-saved-restored", Object.CalleeSavedRestored,
+                       true);
+    YamlIO.mapOptional("local-offset", Object.LocalOffset, Optional<int64_t>());
+    YamlIO.mapOptional("di-variable", Object.DebugVar,
+                       StringValue()); // Don't print it out when it's empty.
+    YamlIO.mapOptional("di-expression", Object.DebugExpr,
+                       StringValue()); // Don't print it out when it's empty.
+    YamlIO.mapOptional("di-location", Object.DebugLoc,
+                       StringValue()); // Don't print it out when it's empty.
+  }
+
+  static const bool flow = true;
+};
+
+/// Serializable representation of the fixed stack object from the
+/// MachineFrameInfo class.
+struct FixedMachineStackObject {
+  enum ObjectType { DefaultType, SpillSlot };
+  UnsignedValue ID;
+  ObjectType Type = DefaultType;
+  int64_t Offset = 0;
+  uint64_t Size = 0;
+  unsigned Alignment = 0;
+  uint8_t StackID = 0;
+  bool IsImmutable = false;
+  bool IsAliased = false;
+  StringValue CalleeSavedRegister;
+  bool CalleeSavedRestored = true;
+
+  bool operator==(const FixedMachineStackObject &Other) const {
+    return ID == Other.ID && Type == Other.Type && Offset == Other.Offset &&
+           Size == Other.Size && Alignment == Other.Alignment &&
+           StackID == Other.StackID &&
+           IsImmutable == Other.IsImmutable && IsAliased == Other.IsAliased &&
+           CalleeSavedRegister == Other.CalleeSavedRegister &&
+           CalleeSavedRestored == Other.CalleeSavedRestored;
+  }
+};
+
+template <>
+struct ScalarEnumerationTraits<FixedMachineStackObject::ObjectType> {
+  static void enumeration(yaml::IO &IO,
+                          FixedMachineStackObject::ObjectType &Type) {
+    IO.enumCase(Type, "default", FixedMachineStackObject::DefaultType);
+    IO.enumCase(Type, "spill-slot", FixedMachineStackObject::SpillSlot);
+  }
+};
+
+template <> struct MappingTraits<FixedMachineStackObject> {
+  static void mapping(yaml::IO &YamlIO, FixedMachineStackObject &Object) {
+    YamlIO.mapRequired("id", Object.ID);
+    YamlIO.mapOptional(
+        "type", Object.Type,
+        FixedMachineStackObject::DefaultType); // Don't print the default type.
+    YamlIO.mapOptional("offset", Object.Offset, (int64_t)0);
+    YamlIO.mapOptional("size", Object.Size, (uint64_t)0);
+    YamlIO.mapOptional("alignment", Object.Alignment, (unsigned)0);
+    YamlIO.mapOptional("stack-id", Object.StackID);
+    if (Object.Type != FixedMachineStackObject::SpillSlot) {
+      YamlIO.mapOptional("isImmutable", Object.IsImmutable, false);
+      YamlIO.mapOptional("isAliased", Object.IsAliased, false);
+    }
+    YamlIO.mapOptional("callee-saved-register", Object.CalleeSavedRegister,
+                       StringValue()); // Don't print it out when it's empty.
+    YamlIO.mapOptional("callee-saved-restored", Object.CalleeSavedRestored,
+                     true);
+  }
+
+  static const bool flow = true;
+};
+
+struct MachineConstantPoolValue {
+  UnsignedValue ID;
+  StringValue Value;
+  unsigned Alignment = 0;
+  bool IsTargetSpecific = false;
+
+  bool operator==(const MachineConstantPoolValue &Other) const {
+    return ID == Other.ID && Value == Other.Value &&
+           Alignment == Other.Alignment &&
+           IsTargetSpecific == Other.IsTargetSpecific;
+  }
+};
+
+template <> struct MappingTraits<MachineConstantPoolValue> {
+  static void mapping(IO &YamlIO, MachineConstantPoolValue &Constant) {
+    YamlIO.mapRequired("id", Constant.ID);
+    YamlIO.mapOptional("value", Constant.Value, StringValue());
+    YamlIO.mapOptional("alignment", Constant.Alignment, (unsigned)0);
+    YamlIO.mapOptional("isTargetSpecific", Constant.IsTargetSpecific, false);
+  }
+};
+
+struct MachineJumpTable {
+  struct Entry {
+    UnsignedValue ID;
+    std::vector<FlowStringValue> Blocks;
+
+    bool operator==(const Entry &Other) const {
+      return ID == Other.ID && Blocks == Other.Blocks;
+    }
+  };
+
+  MachineJumpTableInfo::JTEntryKind Kind = MachineJumpTableInfo::EK_Custom32;
+  std::vector<Entry> Entries;
+
+  bool operator==(const MachineJumpTable &Other) const {
+    return Kind == Other.Kind && Entries == Other.Entries;
+  }
+};
+
+template <> struct MappingTraits<MachineJumpTable::Entry> {
+  static void mapping(IO &YamlIO, MachineJumpTable::Entry &Entry) {
+    YamlIO.mapRequired("id", Entry.ID);
+    YamlIO.mapOptional("blocks", Entry.Blocks, std::vector<FlowStringValue>());
+  }
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineFunctionLiveIn)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::VirtualRegisterDefinition)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineStackObject)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::FixedMachineStackObject)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineConstantPoolValue)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineJumpTable::Entry)
+
+namespace llvm {
+namespace yaml {
+
+template <> struct MappingTraits<MachineJumpTable> {
+  static void mapping(IO &YamlIO, MachineJumpTable &JT) {
+    YamlIO.mapRequired("kind", JT.Kind);
+    YamlIO.mapOptional("entries", JT.Entries,
+                       std::vector<MachineJumpTable::Entry>());
+  }
+};
+
+/// Serializable representation of MachineFrameInfo.
+///
+/// Doesn't serialize attributes like 'StackAlignment', 'IsStackRealignable' and
+/// 'RealignOption' as they are determined by the target and LLVM function
+/// attributes.
+/// It also doesn't serialize attributes like 'NumFixedObject' and
+/// 'HasVarSizedObjects' as they are determined by the frame objects themselves.
+struct MachineFrameInfo {
+  bool IsFrameAddressTaken = false;
+  bool IsReturnAddressTaken = false;
+  bool HasStackMap = false;
+  bool HasPatchPoint = false;
+  uint64_t StackSize = 0;
+  int OffsetAdjustment = 0;
+  unsigned MaxAlignment = 0;
+  bool AdjustsStack = false;
+  bool HasCalls = false;
+  StringValue StackProtector;
+  // TODO: Serialize FunctionContextIdx
+  unsigned MaxCallFrameSize = ~0u; ///< ~0u means: not computed yet.
+  bool HasOpaqueSPAdjustment = false;
+  bool HasVAStart = false;
+  bool HasMustTailInVarArgFunc = false;
+  StringValue SavePoint;
+  StringValue RestorePoint;
+
+  bool operator==(const MachineFrameInfo &Other) const {
+    return IsFrameAddressTaken == Other.IsFrameAddressTaken &&
+           IsReturnAddressTaken == Other.IsReturnAddressTaken &&
+           HasStackMap == Other.HasStackMap &&
+           HasPatchPoint == Other.HasPatchPoint &&
+           StackSize == Other.StackSize &&
+           OffsetAdjustment == Other.OffsetAdjustment &&
+           MaxAlignment == Other.MaxAlignment &&
+           AdjustsStack == Other.AdjustsStack && HasCalls == Other.HasCalls &&
+           StackProtector == Other.StackProtector &&
+           MaxCallFrameSize == Other.MaxCallFrameSize &&
+           HasOpaqueSPAdjustment == Other.HasOpaqueSPAdjustment &&
+           HasVAStart == Other.HasVAStart &&
+           HasMustTailInVarArgFunc == Other.HasMustTailInVarArgFunc &&
+           SavePoint == Other.SavePoint && RestorePoint == Other.RestorePoint;
+  }
+};
+
+template <> struct MappingTraits<MachineFrameInfo> {
+  static void mapping(IO &YamlIO, MachineFrameInfo &MFI) {
+    YamlIO.mapOptional("isFrameAddressTaken", MFI.IsFrameAddressTaken, false);
+    YamlIO.mapOptional("isReturnAddressTaken", MFI.IsReturnAddressTaken, false);
+    YamlIO.mapOptional("hasStackMap", MFI.HasStackMap, false);
+    YamlIO.mapOptional("hasPatchPoint", MFI.HasPatchPoint, false);
+    YamlIO.mapOptional("stackSize", MFI.StackSize, (uint64_t)0);
+    YamlIO.mapOptional("offsetAdjustment", MFI.OffsetAdjustment, (int)0);
+    YamlIO.mapOptional("maxAlignment", MFI.MaxAlignment, (unsigned)0);
+    YamlIO.mapOptional("adjustsStack", MFI.AdjustsStack, false);
+    YamlIO.mapOptional("hasCalls", MFI.HasCalls, false);
+    YamlIO.mapOptional("stackProtector", MFI.StackProtector,
+                       StringValue()); // Don't print it out when it's empty.
+    YamlIO.mapOptional("maxCallFrameSize", MFI.MaxCallFrameSize, (unsigned)~0);
+    YamlIO.mapOptional("hasOpaqueSPAdjustment", MFI.HasOpaqueSPAdjustment,
+                       false);
+    YamlIO.mapOptional("hasVAStart", MFI.HasVAStart, false);
+    YamlIO.mapOptional("hasMustTailInVarArgFunc", MFI.HasMustTailInVarArgFunc,
+                       false);
+    YamlIO.mapOptional("savePoint", MFI.SavePoint,
+                       StringValue()); // Don't print it out when it's empty.
+    YamlIO.mapOptional("restorePoint", MFI.RestorePoint,
+                       StringValue()); // Don't print it out when it's empty.
+  }
+};
+
+struct MachineFunction {
+  StringRef Name;
+  unsigned Alignment = 0;
+  bool ExposesReturnsTwice = false;
+  // GISel MachineFunctionProperties.
+  bool Legalized = false;
+  bool RegBankSelected = false;
+  bool Selected = false;
+  bool FailedISel = false;
+  // Register information
+  bool TracksRegLiveness = false;
+  std::vector<VirtualRegisterDefinition> VirtualRegisters;
+  std::vector<MachineFunctionLiveIn> LiveIns;
+  Optional<std::vector<FlowStringValue>> CalleeSavedRegisters;
+  // TODO: Serialize the various register masks.
+  // Frame information
+  MachineFrameInfo FrameInfo;
+  std::vector<FixedMachineStackObject> FixedStackObjects;
+  std::vector<MachineStackObject> StackObjects;
+  std::vector<MachineConstantPoolValue> Constants; /// Constant pool.
+  MachineJumpTable JumpTableInfo;
+  BlockStringValue Body;
+};
+
+template <> struct MappingTraits<MachineFunction> {
+  static void mapping(IO &YamlIO, MachineFunction &MF) {
+    YamlIO.mapRequired("name", MF.Name);
+    YamlIO.mapOptional("alignment", MF.Alignment, (unsigned)0);
+    YamlIO.mapOptional("exposesReturnsTwice", MF.ExposesReturnsTwice, false);
+    YamlIO.mapOptional("legalized", MF.Legalized, false);
+    YamlIO.mapOptional("regBankSelected", MF.RegBankSelected, false);
+    YamlIO.mapOptional("selected", MF.Selected, false);
+    YamlIO.mapOptional("failedISel", MF.FailedISel, false);
+    YamlIO.mapOptional("tracksRegLiveness", MF.TracksRegLiveness, false);
+    YamlIO.mapOptional("registers", MF.VirtualRegisters,
+                       std::vector<VirtualRegisterDefinition>());
+    YamlIO.mapOptional("liveins", MF.LiveIns,
+                       std::vector<MachineFunctionLiveIn>());
+    YamlIO.mapOptional("calleeSavedRegisters", MF.CalleeSavedRegisters,
+                       Optional<std::vector<FlowStringValue>>());
+    YamlIO.mapOptional("frameInfo", MF.FrameInfo, MachineFrameInfo());
+    YamlIO.mapOptional("fixedStack", MF.FixedStackObjects,
+                       std::vector<FixedMachineStackObject>());
+    YamlIO.mapOptional("stack", MF.StackObjects,
+                       std::vector<MachineStackObject>());
+    YamlIO.mapOptional("constants", MF.Constants,
+                       std::vector<MachineConstantPoolValue>());
+    if (!YamlIO.outputting() || !MF.JumpTableInfo.Entries.empty())
+      YamlIO.mapOptional("jumpTable", MF.JumpTableInfo, MachineJumpTable());
+    YamlIO.mapOptional("body", MF.Body, BlockStringValue());
+  }
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MIRYAMLMAPPING_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachORelocation.h b/linux-x64/clang/include/llvm/CodeGen/MachORelocation.h
new file mode 100644
index 0000000..8c9b7a8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachORelocation.h
@@ -0,0 +1,56 @@
+//=== MachORelocation.h - Mach-O Relocation Info ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MachORelocation class.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_CODEGEN_MACHORELOCATION_H
+#define LLVM_CODEGEN_MACHORELOCATION_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+  /// MachORelocation - This struct contains information about each relocation
+  /// that needs to be emitted to the file.
+  /// see <mach-o/reloc.h>
+  class MachORelocation {
+    uint32_t r_address;   // offset in the section to what is being  relocated
+    uint32_t r_symbolnum; // symbol index if r_extern == 1 else section index
+    bool     r_pcrel;     // was relocated pc-relative already
+    uint8_t  r_length;    // length = 2 ^ r_length
+    bool     r_extern;    // 
+    uint8_t  r_type;      // if not 0, machine-specific relocation type.
+    bool     r_scattered; // 1 = scattered, 0 = non-scattered
+    int32_t  r_value;     // the value the item to be relocated is referring
+                          // to.
+  public:      
+    uint32_t getPackedFields() const {
+      if (r_scattered)
+        return (1 << 31) | (r_pcrel << 30) | ((r_length & 3) << 28) | 
+          ((r_type & 15) << 24) | (r_address & 0x00FFFFFF);
+      else
+        return (r_symbolnum << 8) | (r_pcrel << 7) | ((r_length & 3) << 5) |
+          (r_extern << 4) | (r_type & 15);
+    }
+    uint32_t getAddress() const { return r_scattered ? r_value : r_address; }
+    uint32_t getRawAddress() const { return r_address; }
+
+    MachORelocation(uint32_t addr, uint32_t index, bool pcrel, uint8_t len,
+                    bool ext, uint8_t type, bool scattered = false, 
+                    int32_t value = 0) : 
+      r_address(addr), r_symbolnum(index), r_pcrel(pcrel), r_length(len),
+      r_extern(ext), r_type(type), r_scattered(scattered), r_value(value) {}
+  };
+
+} // end llvm namespace
+
+#endif // LLVM_CODEGEN_MACHORELOCATION_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineBasicBlock.h b/linux-x64/clang/include/llvm/CodeGen/MachineBasicBlock.h
new file mode 100644
index 0000000..f3130b6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineBasicBlock.h
@@ -0,0 +1,918 @@
+//===- llvm/CodeGen/MachineBasicBlock.h -------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Collect the sequence of machine instructions for a basic block.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEBASICBLOCK_H
+#define LLVM_CODEGEN_MACHINEBASICBLOCK_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/simple_ilist.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBundleIterator.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/MC/LaneBitmask.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Support/BranchProbability.h"
+#include "llvm/Support/Printable.h"
+#include <cassert>
+#include <cstdint>
+#include <functional>
+#include <iterator>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class BasicBlock;
+class MachineFunction;
+class MCSymbol;
+class ModuleSlotTracker;
+class Pass;
+class SlotIndexes;
+class StringRef;
+class raw_ostream;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+template <> struct ilist_traits<MachineInstr> {
+private:
+  friend class MachineBasicBlock; // Set by the owning MachineBasicBlock.
+
+  MachineBasicBlock *Parent;
+
+  using instr_iterator =
+      simple_ilist<MachineInstr, ilist_sentinel_tracking<true>>::iterator;
+
+public:
+  void addNodeToList(MachineInstr *N);
+  void removeNodeFromList(MachineInstr *N);
+  void transferNodesFromList(ilist_traits &OldList, instr_iterator First,
+                             instr_iterator Last);
+  void deleteNode(MachineInstr *MI);
+};
+
+class MachineBasicBlock
+    : public ilist_node_with_parent<MachineBasicBlock, MachineFunction> {
+public:
+  /// Pair of physical register and lane mask.
+  /// This is not simply a std::pair typedef because the members should be named
+  /// clearly as they both have an integer type.
+  struct RegisterMaskPair {
+  public:
+    MCPhysReg PhysReg;
+    LaneBitmask LaneMask;
+
+    RegisterMaskPair(MCPhysReg PhysReg, LaneBitmask LaneMask)
+        : PhysReg(PhysReg), LaneMask(LaneMask) {}
+  };
+
+private:
+  using Instructions = ilist<MachineInstr, ilist_sentinel_tracking<true>>;
+
+  Instructions Insts;
+  const BasicBlock *BB;
+  int Number;
+  MachineFunction *xParent;
+
+  /// Keep track of the predecessor / successor basic blocks.
+  std::vector<MachineBasicBlock *> Predecessors;
+  std::vector<MachineBasicBlock *> Successors;
+
+  /// Keep track of the probabilities to the successors. This vector has the
+  /// same order as Successors, or it is empty if we don't use it (disable
+  /// optimization).
+  std::vector<BranchProbability> Probs;
+  using probability_iterator = std::vector<BranchProbability>::iterator;
+  using const_probability_iterator =
+      std::vector<BranchProbability>::const_iterator;
+
+  Optional<uint64_t> IrrLoopHeaderWeight;
+
+  /// Keep track of the physical registers that are livein of the basicblock.
+  using LiveInVector = std::vector<RegisterMaskPair>;
+  LiveInVector LiveIns;
+
+  /// Alignment of the basic block. Zero if the basic block does not need to be
+  /// aligned. The alignment is specified as log2(bytes).
+  unsigned Alignment = 0;
+
+  /// Indicate that this basic block is entered via an exception handler.
+  bool IsEHPad = false;
+
+  /// Indicate that this basic block is potentially the target of an indirect
+  /// branch.
+  bool AddressTaken = false;
+
+  /// Indicate that this basic block is the entry block of an EH funclet.
+  bool IsEHFuncletEntry = false;
+
+  /// Indicate that this basic block is the entry block of a cleanup funclet.
+  bool IsCleanupFuncletEntry = false;
+
+  /// \brief since getSymbol is a relatively heavy-weight operation, the symbol
+  /// is only computed once and is cached.
+  mutable MCSymbol *CachedMCSymbol = nullptr;
+
+  // Intrusive list support
+  MachineBasicBlock() = default;
+
+  explicit MachineBasicBlock(MachineFunction &MF, const BasicBlock *BB);
+
+  ~MachineBasicBlock();
+
+  // MachineBasicBlocks are allocated and owned by MachineFunction.
+  friend class MachineFunction;
+
+public:
+  /// Return the LLVM basic block that this instance corresponded to originally.
+  /// Note that this may be NULL if this instance does not correspond directly
+  /// to an LLVM basic block.
+  const BasicBlock *getBasicBlock() const { return BB; }
+
+  /// Return the name of the corresponding LLVM basic block, or an empty string.
+  StringRef getName() const;
+
+  /// Return a formatted string to identify this block and its parent function.
+  std::string getFullName() const;
+
+  /// Test whether this block is potentially the target of an indirect branch.
+  bool hasAddressTaken() const { return AddressTaken; }
+
+  /// Set this block to reflect that it potentially is the target of an indirect
+  /// branch.
+  void setHasAddressTaken() { AddressTaken = true; }
+
+  /// Return the MachineFunction containing this basic block.
+  const MachineFunction *getParent() const { return xParent; }
+  MachineFunction *getParent() { return xParent; }
+
+  using instr_iterator = Instructions::iterator;
+  using const_instr_iterator = Instructions::const_iterator;
+  using reverse_instr_iterator = Instructions::reverse_iterator;
+  using const_reverse_instr_iterator = Instructions::const_reverse_iterator;
+
+  using iterator = MachineInstrBundleIterator<MachineInstr>;
+  using const_iterator = MachineInstrBundleIterator<const MachineInstr>;
+  using reverse_iterator = MachineInstrBundleIterator<MachineInstr, true>;
+  using const_reverse_iterator =
+      MachineInstrBundleIterator<const MachineInstr, true>;
+
+  unsigned size() const { return (unsigned)Insts.size(); }
+  bool empty() const { return Insts.empty(); }
+
+  MachineInstr       &instr_front()       { return Insts.front(); }
+  MachineInstr       &instr_back()        { return Insts.back();  }
+  const MachineInstr &instr_front() const { return Insts.front(); }
+  const MachineInstr &instr_back()  const { return Insts.back();  }
+
+  MachineInstr       &front()             { return Insts.front(); }
+  MachineInstr       &back()              { return *--end();      }
+  const MachineInstr &front()       const { return Insts.front(); }
+  const MachineInstr &back()        const { return *--end();      }
+
+  instr_iterator                instr_begin()       { return Insts.begin();  }
+  const_instr_iterator          instr_begin() const { return Insts.begin();  }
+  instr_iterator                  instr_end()       { return Insts.end();    }
+  const_instr_iterator            instr_end() const { return Insts.end();    }
+  reverse_instr_iterator       instr_rbegin()       { return Insts.rbegin(); }
+  const_reverse_instr_iterator instr_rbegin() const { return Insts.rbegin(); }
+  reverse_instr_iterator       instr_rend  ()       { return Insts.rend();   }
+  const_reverse_instr_iterator instr_rend  () const { return Insts.rend();   }
+
+  using instr_range = iterator_range<instr_iterator>;
+  using const_instr_range = iterator_range<const_instr_iterator>;
+  instr_range instrs() { return instr_range(instr_begin(), instr_end()); }
+  const_instr_range instrs() const {
+    return const_instr_range(instr_begin(), instr_end());
+  }
+
+  iterator                begin()       { return instr_begin();  }
+  const_iterator          begin() const { return instr_begin();  }
+  iterator                end  ()       { return instr_end();    }
+  const_iterator          end  () const { return instr_end();    }
+  reverse_iterator rbegin() {
+    return reverse_iterator::getAtBundleBegin(instr_rbegin());
+  }
+  const_reverse_iterator rbegin() const {
+    return const_reverse_iterator::getAtBundleBegin(instr_rbegin());
+  }
+  reverse_iterator rend() { return reverse_iterator(instr_rend()); }
+  const_reverse_iterator rend() const {
+    return const_reverse_iterator(instr_rend());
+  }
+
+  /// Support for MachineInstr::getNextNode().
+  static Instructions MachineBasicBlock::*getSublistAccess(MachineInstr *) {
+    return &MachineBasicBlock::Insts;
+  }
+
+  inline iterator_range<iterator> terminators() {
+    return make_range(getFirstTerminator(), end());
+  }
+  inline iterator_range<const_iterator> terminators() const {
+    return make_range(getFirstTerminator(), end());
+  }
+
+  /// Returns a range that iterates over the phis in the basic block.
+  inline iterator_range<iterator> phis() {
+    return make_range(begin(), getFirstNonPHI());
+  }
+  inline iterator_range<const_iterator> phis() const {
+    return const_cast<MachineBasicBlock *>(this)->phis();
+  }
+
+  // Machine-CFG iterators
+  using pred_iterator = std::vector<MachineBasicBlock *>::iterator;
+  using const_pred_iterator = std::vector<MachineBasicBlock *>::const_iterator;
+  using succ_iterator = std::vector<MachineBasicBlock *>::iterator;
+  using const_succ_iterator = std::vector<MachineBasicBlock *>::const_iterator;
+  using pred_reverse_iterator =
+      std::vector<MachineBasicBlock *>::reverse_iterator;
+  using const_pred_reverse_iterator =
+      std::vector<MachineBasicBlock *>::const_reverse_iterator;
+  using succ_reverse_iterator =
+      std::vector<MachineBasicBlock *>::reverse_iterator;
+  using const_succ_reverse_iterator =
+      std::vector<MachineBasicBlock *>::const_reverse_iterator;
+  pred_iterator        pred_begin()       { return Predecessors.begin(); }
+  const_pred_iterator  pred_begin() const { return Predecessors.begin(); }
+  pred_iterator        pred_end()         { return Predecessors.end();   }
+  const_pred_iterator  pred_end()   const { return Predecessors.end();   }
+  pred_reverse_iterator        pred_rbegin()
+                                          { return Predecessors.rbegin();}
+  const_pred_reverse_iterator  pred_rbegin() const
+                                          { return Predecessors.rbegin();}
+  pred_reverse_iterator        pred_rend()
+                                          { return Predecessors.rend();  }
+  const_pred_reverse_iterator  pred_rend()   const
+                                          { return Predecessors.rend();  }
+  unsigned             pred_size()  const {
+    return (unsigned)Predecessors.size();
+  }
+  bool                 pred_empty() const { return Predecessors.empty(); }
+  succ_iterator        succ_begin()       { return Successors.begin();   }
+  const_succ_iterator  succ_begin() const { return Successors.begin();   }
+  succ_iterator        succ_end()         { return Successors.end();     }
+  const_succ_iterator  succ_end()   const { return Successors.end();     }
+  succ_reverse_iterator        succ_rbegin()
+                                          { return Successors.rbegin();  }
+  const_succ_reverse_iterator  succ_rbegin() const
+                                          { return Successors.rbegin();  }
+  succ_reverse_iterator        succ_rend()
+                                          { return Successors.rend();    }
+  const_succ_reverse_iterator  succ_rend()   const
+                                          { return Successors.rend();    }
+  unsigned             succ_size()  const {
+    return (unsigned)Successors.size();
+  }
+  bool                 succ_empty() const { return Successors.empty();   }
+
+  inline iterator_range<pred_iterator> predecessors() {
+    return make_range(pred_begin(), pred_end());
+  }
+  inline iterator_range<const_pred_iterator> predecessors() const {
+    return make_range(pred_begin(), pred_end());
+  }
+  inline iterator_range<succ_iterator> successors() {
+    return make_range(succ_begin(), succ_end());
+  }
+  inline iterator_range<const_succ_iterator> successors() const {
+    return make_range(succ_begin(), succ_end());
+  }
+
+  // LiveIn management methods.
+
+  /// Adds the specified register as a live in. Note that it is an error to add
+  /// the same register to the same set more than once unless the intention is
+  /// to call sortUniqueLiveIns after all registers are added.
+  void addLiveIn(MCPhysReg PhysReg,
+                 LaneBitmask LaneMask = LaneBitmask::getAll()) {
+    LiveIns.push_back(RegisterMaskPair(PhysReg, LaneMask));
+  }
+  void addLiveIn(const RegisterMaskPair &RegMaskPair) {
+    LiveIns.push_back(RegMaskPair);
+  }
+
+  /// Sorts and uniques the LiveIns vector. It can be significantly faster to do
+  /// this than repeatedly calling isLiveIn before calling addLiveIn for every
+  /// LiveIn insertion.
+  void sortUniqueLiveIns();
+
+  /// Clear live in list.
+  void clearLiveIns();
+
+  /// Add PhysReg as live in to this block, and ensure that there is a copy of
+  /// PhysReg to a virtual register of class RC. Return the virtual register
+  /// that is a copy of the live in PhysReg.
+  unsigned addLiveIn(MCPhysReg PhysReg, const TargetRegisterClass *RC);
+
+  /// Remove the specified register from the live in set.
+  void removeLiveIn(MCPhysReg Reg,
+                    LaneBitmask LaneMask = LaneBitmask::getAll());
+
+  /// Return true if the specified register is in the live in set.
+  bool isLiveIn(MCPhysReg Reg,
+                LaneBitmask LaneMask = LaneBitmask::getAll()) const;
+
+  // Iteration support for live in sets.  These sets are kept in sorted
+  // order by their register number.
+  using livein_iterator = LiveInVector::const_iterator;
+#ifndef NDEBUG
+  /// Unlike livein_begin, this method does not check that the liveness
+  /// information is accurate. Still for debug purposes it may be useful
+  /// to have iterators that won't assert if the liveness information
+  /// is not current.
+  livein_iterator livein_begin_dbg() const { return LiveIns.begin(); }
+  iterator_range<livein_iterator> liveins_dbg() const {
+    return make_range(livein_begin_dbg(), livein_end());
+  }
+#endif
+  livein_iterator livein_begin() const;
+  livein_iterator livein_end()   const { return LiveIns.end(); }
+  bool            livein_empty() const { return LiveIns.empty(); }
+  iterator_range<livein_iterator> liveins() const {
+    return make_range(livein_begin(), livein_end());
+  }
+
+  /// Remove entry from the livein set and return iterator to the next.
+  livein_iterator removeLiveIn(livein_iterator I);
+
+  /// Get the clobber mask for the start of this basic block. Funclets use this
+  /// to prevent register allocation across funclet transitions.
+  const uint32_t *getBeginClobberMask(const TargetRegisterInfo *TRI) const;
+
+  /// Get the clobber mask for the end of the basic block.
+  /// \see getBeginClobberMask()
+  const uint32_t *getEndClobberMask(const TargetRegisterInfo *TRI) const;
+
+  /// Return alignment of the basic block. The alignment is specified as
+  /// log2(bytes).
+  unsigned getAlignment() const { return Alignment; }
+
+  /// Set alignment of the basic block. The alignment is specified as
+  /// log2(bytes).
+  void setAlignment(unsigned Align) { Alignment = Align; }
+
+  /// Returns true if the block is a landing pad. That is this basic block is
+  /// entered via an exception handler.
+  bool isEHPad() const { return IsEHPad; }
+
+  /// Indicates the block is a landing pad.  That is this basic block is entered
+  /// via an exception handler.
+  void setIsEHPad(bool V = true) { IsEHPad = V; }
+
+  bool hasEHPadSuccessor() const;
+
+  /// Returns true if this is the entry block of an EH funclet.
+  bool isEHFuncletEntry() const { return IsEHFuncletEntry; }
+
+  /// Indicates if this is the entry block of an EH funclet.
+  void setIsEHFuncletEntry(bool V = true) { IsEHFuncletEntry = V; }
+
+  /// Returns true if this is the entry block of a cleanup funclet.
+  bool isCleanupFuncletEntry() const { return IsCleanupFuncletEntry; }
+
+  /// Indicates if this is the entry block of a cleanup funclet.
+  void setIsCleanupFuncletEntry(bool V = true) { IsCleanupFuncletEntry = V; }
+
+  /// Returns true if it is legal to hoist instructions into this block.
+  bool isLegalToHoistInto() const;
+
+  // Code Layout methods.
+
+  /// Move 'this' block before or after the specified block.  This only moves
+  /// the block, it does not modify the CFG or adjust potential fall-throughs at
+  /// the end of the block.
+  void moveBefore(MachineBasicBlock *NewAfter);
+  void moveAfter(MachineBasicBlock *NewBefore);
+
+  /// Update the terminator instructions in block to account for changes to the
+  /// layout. If the block previously used a fallthrough, it may now need a
+  /// branch, and if it previously used branching it may now be able to use a
+  /// fallthrough.
+  void updateTerminator();
+
+  // Machine-CFG mutators
+
+  /// Add Succ as a successor of this MachineBasicBlock.  The Predecessors list
+  /// of Succ is automatically updated. PROB parameter is stored in
+  /// Probabilities list. The default probability is set as unknown. Mixing
+  /// known and unknown probabilities in successor list is not allowed. When all
+  /// successors have unknown probabilities, 1 / N is returned as the
+  /// probability for each successor, where N is the number of successors.
+  ///
+  /// Note that duplicate Machine CFG edges are not allowed.
+  void addSuccessor(MachineBasicBlock *Succ,
+                    BranchProbability Prob = BranchProbability::getUnknown());
+
+  /// Add Succ as a successor of this MachineBasicBlock.  The Predecessors list
+  /// of Succ is automatically updated. The probability is not provided because
+  /// BPI is not available (e.g. -O0 is used), in which case edge probabilities
+  /// won't be used. Using this interface can save some space.
+  void addSuccessorWithoutProb(MachineBasicBlock *Succ);
+
+  /// Set successor probability of a given iterator.
+  void setSuccProbability(succ_iterator I, BranchProbability Prob);
+
+  /// Normalize probabilities of all successors so that the sum of them becomes
+  /// one. This is usually done when the current update on this MBB is done, and
+  /// the sum of its successors' probabilities is not guaranteed to be one. The
+  /// user is responsible for the correct use of this function.
+  /// MBB::removeSuccessor() has an option to do this automatically.
+  void normalizeSuccProbs() {
+    BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
+  }
+
+  /// Validate successors' probabilities and check if the sum of them is
+  /// approximate one. This only works in DEBUG mode.
+  void validateSuccProbs() const;
+
+  /// Remove successor from the successors list of this MachineBasicBlock. The
+  /// Predecessors list of Succ is automatically updated.
+  /// If NormalizeSuccProbs is true, then normalize successors' probabilities
+  /// after the successor is removed.
+  void removeSuccessor(MachineBasicBlock *Succ,
+                       bool NormalizeSuccProbs = false);
+
+  /// Remove specified successor from the successors list of this
+  /// MachineBasicBlock. The Predecessors list of Succ is automatically updated.
+  /// If NormalizeSuccProbs is true, then normalize successors' probabilities
+  /// after the successor is removed.
+  /// Return the iterator to the element after the one removed.
+  succ_iterator removeSuccessor(succ_iterator I,
+                                bool NormalizeSuccProbs = false);
+
+  /// Replace successor OLD with NEW and update probability info.
+  void replaceSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New);
+
+  /// Copy a successor (and any probability info) from original block to this
+  /// block's. Uses an iterator into the original blocks successors.
+  ///
+  /// This is useful when doing a partial clone of successors. Afterward, the
+  /// probabilities may need to be normalized.
+  void copySuccessor(MachineBasicBlock *Orig, succ_iterator I);
+
+  /// Transfers all the successors from MBB to this machine basic block (i.e.,
+  /// copies all the successors FromMBB and remove all the successors from
+  /// FromMBB).
+  void transferSuccessors(MachineBasicBlock *FromMBB);
+
+  /// Transfers all the successors, as in transferSuccessors, and update PHI
+  /// operands in the successor blocks which refer to FromMBB to refer to this.
+  void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB);
+
+  /// Return true if any of the successors have probabilities attached to them.
+  bool hasSuccessorProbabilities() const { return !Probs.empty(); }
+
+  /// Return true if the specified MBB is a predecessor of this block.
+  bool isPredecessor(const MachineBasicBlock *MBB) const;
+
+  /// Return true if the specified MBB is a successor of this block.
+  bool isSuccessor(const MachineBasicBlock *MBB) const;
+
+  /// Return true if the specified MBB will be emitted immediately after this
+  /// block, such that if this block exits by falling through, control will
+  /// transfer to the specified MBB. Note that MBB need not be a successor at
+  /// all, for example if this block ends with an unconditional branch to some
+  /// other block.
+  bool isLayoutSuccessor(const MachineBasicBlock *MBB) const;
+
+  /// Return the fallthrough block if the block can implicitly
+  /// transfer control to the block after it by falling off the end of
+  /// it.  This should return null if it can reach the block after
+  /// it, but it uses an explicit branch to do so (e.g., a table
+  /// jump).  Non-null return  is a conservative answer.
+  MachineBasicBlock *getFallThrough();
+
+  /// Return true if the block can implicitly transfer control to the
+  /// block after it by falling off the end of it.  This should return
+  /// false if it can reach the block after it, but it uses an
+  /// explicit branch to do so (e.g., a table jump).  True is a
+  /// conservative answer.
+  bool canFallThrough();
+
+  /// Returns a pointer to the first instruction in this block that is not a
+  /// PHINode instruction. When adding instructions to the beginning of the
+  /// basic block, they should be added before the returned value, not before
+  /// the first instruction, which might be PHI.
+  /// Returns end() is there's no non-PHI instruction.
+  iterator getFirstNonPHI();
+
+  /// Return the first instruction in MBB after I that is not a PHI or a label.
+  /// This is the correct point to insert lowered copies at the beginning of a
+  /// basic block that must be before any debugging information.
+  iterator SkipPHIsAndLabels(iterator I);
+
+  /// Return the first instruction in MBB after I that is not a PHI, label or
+  /// debug.  This is the correct point to insert copies at the beginning of a
+  /// basic block.
+  iterator SkipPHIsLabelsAndDebug(iterator I);
+
+  /// Returns an iterator to the first terminator instruction of this basic
+  /// block. If a terminator does not exist, it returns end().
+  iterator getFirstTerminator();
+  const_iterator getFirstTerminator() const {
+    return const_cast<MachineBasicBlock *>(this)->getFirstTerminator();
+  }
+
+  /// Same getFirstTerminator but it ignores bundles and return an
+  /// instr_iterator instead.
+  instr_iterator getFirstInstrTerminator();
+
+  /// Returns an iterator to the first non-debug instruction in the basic block,
+  /// or end().
+  iterator getFirstNonDebugInstr();
+  const_iterator getFirstNonDebugInstr() const {
+    return const_cast<MachineBasicBlock *>(this)->getFirstNonDebugInstr();
+  }
+
+  /// Returns an iterator to the last non-debug instruction in the basic block,
+  /// or end().
+  iterator getLastNonDebugInstr();
+  const_iterator getLastNonDebugInstr() const {
+    return const_cast<MachineBasicBlock *>(this)->getLastNonDebugInstr();
+  }
+
+  /// Convenience function that returns true if the block ends in a return
+  /// instruction.
+  bool isReturnBlock() const {
+    return !empty() && back().isReturn();
+  }
+
+  /// Split the critical edge from this block to the given successor block, and
+  /// return the newly created block, or null if splitting is not possible.
+  ///
+  /// This function updates LiveVariables, MachineDominatorTree, and
+  /// MachineLoopInfo, as applicable.
+  MachineBasicBlock *SplitCriticalEdge(MachineBasicBlock *Succ, Pass &P);
+
+  /// Check if the edge between this block and the given successor \p
+  /// Succ, can be split. If this returns true a subsequent call to
+  /// SplitCriticalEdge is guaranteed to return a valid basic block if
+  /// no changes occurred in the meantime.
+  bool canSplitCriticalEdge(const MachineBasicBlock *Succ) const;
+
+  void pop_front() { Insts.pop_front(); }
+  void pop_back() { Insts.pop_back(); }
+  void push_back(MachineInstr *MI) { Insts.push_back(MI); }
+
+  /// Insert MI into the instruction list before I, possibly inside a bundle.
+  ///
+  /// If the insertion point is inside a bundle, MI will be added to the bundle,
+  /// otherwise MI will not be added to any bundle. That means this function
+  /// alone can't be used to prepend or append instructions to bundles. See
+  /// MIBundleBuilder::insert() for a more reliable way of doing that.
+  instr_iterator insert(instr_iterator I, MachineInstr *M);
+
+  /// Insert a range of instructions into the instruction list before I.
+  template<typename IT>
+  void insert(iterator I, IT S, IT E) {
+    assert((I == end() || I->getParent() == this) &&
+           "iterator points outside of basic block");
+    Insts.insert(I.getInstrIterator(), S, E);
+  }
+
+  /// Insert MI into the instruction list before I.
+  iterator insert(iterator I, MachineInstr *MI) {
+    assert((I == end() || I->getParent() == this) &&
+           "iterator points outside of basic block");
+    assert(!MI->isBundledWithPred() && !MI->isBundledWithSucc() &&
+           "Cannot insert instruction with bundle flags");
+    return Insts.insert(I.getInstrIterator(), MI);
+  }
+
+  /// Insert MI into the instruction list after I.
+  iterator insertAfter(iterator I, MachineInstr *MI) {
+    assert((I == end() || I->getParent() == this) &&
+           "iterator points outside of basic block");
+    assert(!MI->isBundledWithPred() && !MI->isBundledWithSucc() &&
+           "Cannot insert instruction with bundle flags");
+    return Insts.insertAfter(I.getInstrIterator(), MI);
+  }
+
+  /// Remove an instruction from the instruction list and delete it.
+  ///
+  /// If the instruction is part of a bundle, the other instructions in the
+  /// bundle will still be bundled after removing the single instruction.
+  instr_iterator erase(instr_iterator I);
+
+  /// Remove an instruction from the instruction list and delete it.
+  ///
+  /// If the instruction is part of a bundle, the other instructions in the
+  /// bundle will still be bundled after removing the single instruction.
+  instr_iterator erase_instr(MachineInstr *I) {
+    return erase(instr_iterator(I));
+  }
+
+  /// Remove a range of instructions from the instruction list and delete them.
+  iterator erase(iterator I, iterator E) {
+    return Insts.erase(I.getInstrIterator(), E.getInstrIterator());
+  }
+
+  /// Remove an instruction or bundle from the instruction list and delete it.
+  ///
+  /// If I points to a bundle of instructions, they are all erased.
+  iterator erase(iterator I) {
+    return erase(I, std::next(I));
+  }
+
+  /// Remove an instruction from the instruction list and delete it.
+  ///
+  /// If I is the head of a bundle of instructions, the whole bundle will be
+  /// erased.
+  iterator erase(MachineInstr *I) {
+    return erase(iterator(I));
+  }
+
+  /// Remove the unbundled instruction from the instruction list without
+  /// deleting it.
+  ///
+  /// This function can not be used to remove bundled instructions, use
+  /// remove_instr to remove individual instructions from a bundle.
+  MachineInstr *remove(MachineInstr *I) {
+    assert(!I->isBundled() && "Cannot remove bundled instructions");
+    return Insts.remove(instr_iterator(I));
+  }
+
+  /// Remove the possibly bundled instruction from the instruction list
+  /// without deleting it.
+  ///
+  /// If the instruction is part of a bundle, the other instructions in the
+  /// bundle will still be bundled after removing the single instruction.
+  MachineInstr *remove_instr(MachineInstr *I);
+
+  void clear() {
+    Insts.clear();
+  }
+
+  /// Take an instruction from MBB 'Other' at the position From, and insert it
+  /// into this MBB right before 'Where'.
+  ///
+  /// If From points to a bundle of instructions, the whole bundle is moved.
+  void splice(iterator Where, MachineBasicBlock *Other, iterator From) {
+    // The range splice() doesn't allow noop moves, but this one does.
+    if (Where != From)
+      splice(Where, Other, From, std::next(From));
+  }
+
+  /// Take a block of instructions from MBB 'Other' in the range [From, To),
+  /// and insert them into this MBB right before 'Where'.
+  ///
+  /// The instruction at 'Where' must not be included in the range of
+  /// instructions to move.
+  void splice(iterator Where, MachineBasicBlock *Other,
+              iterator From, iterator To) {
+    Insts.splice(Where.getInstrIterator(), Other->Insts,
+                 From.getInstrIterator(), To.getInstrIterator());
+  }
+
+  /// This method unlinks 'this' from the containing function, and returns it,
+  /// but does not delete it.
+  MachineBasicBlock *removeFromParent();
+
+  /// This method unlinks 'this' from the containing function and deletes it.
+  void eraseFromParent();
+
+  /// Given a machine basic block that branched to 'Old', change the code and
+  /// CFG so that it branches to 'New' instead.
+  void ReplaceUsesOfBlockWith(MachineBasicBlock *Old, MachineBasicBlock *New);
+
+  /// Various pieces of code can cause excess edges in the CFG to be inserted.
+  /// If we have proven that MBB can only branch to DestA and DestB, remove any
+  /// other MBB successors from the CFG. DestA and DestB can be null. Besides
+  /// DestA and DestB, retain other edges leading to LandingPads (currently
+  /// there can be only one; we don't check or require that here). Note it is
+  /// possible that DestA and/or DestB are LandingPads.
+  bool CorrectExtraCFGEdges(MachineBasicBlock *DestA,
+                            MachineBasicBlock *DestB,
+                            bool IsCond);
+
+  /// Find the next valid DebugLoc starting at MBBI, skipping any DBG_VALUE
+  /// instructions.  Return UnknownLoc if there is none.
+  DebugLoc findDebugLoc(instr_iterator MBBI);
+  DebugLoc findDebugLoc(iterator MBBI) {
+    return findDebugLoc(MBBI.getInstrIterator());
+  }
+
+  /// Find the previous valid DebugLoc preceding MBBI, skipping and DBG_VALUE
+  /// instructions.  Return UnknownLoc if there is none.
+  DebugLoc findPrevDebugLoc(instr_iterator MBBI);
+  DebugLoc findPrevDebugLoc(iterator MBBI) {
+    return findPrevDebugLoc(MBBI.getInstrIterator());
+  }
+
+  /// Find and return the merged DebugLoc of the branch instructions of the
+  /// block. Return UnknownLoc if there is none.
+  DebugLoc findBranchDebugLoc();
+
+  /// Possible outcome of a register liveness query to computeRegisterLiveness()
+  enum LivenessQueryResult {
+    LQR_Live,   ///< Register is known to be (at least partially) live.
+    LQR_Dead,   ///< Register is known to be fully dead.
+    LQR_Unknown ///< Register liveness not decidable from local neighborhood.
+  };
+
+  /// Return whether (physical) register \p Reg has been defined and not
+  /// killed as of just before \p Before.
+  ///
+  /// Search is localised to a neighborhood of \p Neighborhood instructions
+  /// before (searching for defs or kills) and \p Neighborhood instructions
+  /// after (searching just for defs) \p Before.
+  ///
+  /// \p Reg must be a physical register.
+  LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI,
+                                              unsigned Reg,
+                                              const_iterator Before,
+                                              unsigned Neighborhood = 10) const;
+
+  // Debugging methods.
+  void dump() const;
+  void print(raw_ostream &OS, const SlotIndexes * = nullptr,
+             bool IsStandalone = true) const;
+  void print(raw_ostream &OS, ModuleSlotTracker &MST,
+             const SlotIndexes * = nullptr, bool IsStandalone = true) const;
+
+  // Printing method used by LoopInfo.
+  void printAsOperand(raw_ostream &OS, bool PrintType = true) const;
+
+  /// MachineBasicBlocks are uniquely numbered at the function level, unless
+  /// they're not in a MachineFunction yet, in which case this will return -1.
+  int getNumber() const { return Number; }
+  void setNumber(int N) { Number = N; }
+
+  /// Return the MCSymbol for this basic block.
+  MCSymbol *getSymbol() const;
+
+  Optional<uint64_t> getIrrLoopHeaderWeight() const {
+    return IrrLoopHeaderWeight;
+  }
+
+  void setIrrLoopHeaderWeight(uint64_t Weight) {
+    IrrLoopHeaderWeight = Weight;
+  }
+
+private:
+  /// Return probability iterator corresponding to the I successor iterator.
+  probability_iterator getProbabilityIterator(succ_iterator I);
+  const_probability_iterator
+  getProbabilityIterator(const_succ_iterator I) const;
+
+  friend class MachineBranchProbabilityInfo;
+  friend class MIPrinter;
+
+  /// Return probability of the edge from this block to MBB. This method should
+  /// NOT be called directly, but by using getEdgeProbability method from
+  /// MachineBranchProbabilityInfo class.
+  BranchProbability getSuccProbability(const_succ_iterator Succ) const;
+
+  // Methods used to maintain doubly linked list of blocks...
+  friend struct ilist_callback_traits<MachineBasicBlock>;
+
+  // Machine-CFG mutators
+
+  /// Add Pred as a predecessor of this MachineBasicBlock. Don't do this
+  /// unless you know what you're doing, because it doesn't update Pred's
+  /// successors list. Use Pred->addSuccessor instead.
+  void addPredecessor(MachineBasicBlock *Pred);
+
+  /// Remove Pred as a predecessor of this MachineBasicBlock. Don't do this
+  /// unless you know what you're doing, because it doesn't update Pred's
+  /// successors list. Use Pred->removeSuccessor instead.
+  void removePredecessor(MachineBasicBlock *Pred);
+};
+
+raw_ostream& operator<<(raw_ostream &OS, const MachineBasicBlock &MBB);
+
+/// Prints a machine basic block reference.
+///
+/// The format is:
+///   %bb.5           - a machine basic block with MBB.getNumber() == 5.
+///
+/// Usage: OS << printMBBReference(MBB) << '\n';
+Printable printMBBReference(const MachineBasicBlock &MBB);
+
+// This is useful when building IndexedMaps keyed on basic block pointers.
+struct MBB2NumberFunctor {
+  using argument_type = const MachineBasicBlock *;
+  unsigned operator()(const MachineBasicBlock *MBB) const {
+    return MBB->getNumber();
+  }
+};
+
+//===--------------------------------------------------------------------===//
+// GraphTraits specializations for machine basic block graphs (machine-CFGs)
+//===--------------------------------------------------------------------===//
+
+// Provide specializations of GraphTraits to be able to treat a
+// MachineFunction as a graph of MachineBasicBlocks.
+//
+
+template <> struct GraphTraits<MachineBasicBlock *> {
+  using NodeRef = MachineBasicBlock *;
+  using ChildIteratorType = MachineBasicBlock::succ_iterator;
+
+  static NodeRef getEntryNode(MachineBasicBlock *BB) { return BB; }
+  static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->succ_end(); }
+};
+
+template <> struct GraphTraits<const MachineBasicBlock *> {
+  using NodeRef = const MachineBasicBlock *;
+  using ChildIteratorType = MachineBasicBlock::const_succ_iterator;
+
+  static NodeRef getEntryNode(const MachineBasicBlock *BB) { return BB; }
+  static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->succ_end(); }
+};
+
+// Provide specializations of GraphTraits to be able to treat a
+// MachineFunction as a graph of MachineBasicBlocks and to walk it
+// in inverse order.  Inverse order for a function is considered
+// to be when traversing the predecessor edges of a MBB
+// instead of the successor edges.
+//
+template <> struct GraphTraits<Inverse<MachineBasicBlock*>> {
+  using NodeRef = MachineBasicBlock *;
+  using ChildIteratorType = MachineBasicBlock::pred_iterator;
+
+  static NodeRef getEntryNode(Inverse<MachineBasicBlock *> G) {
+    return G.Graph;
+  }
+
+  static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
+};
+
+template <> struct GraphTraits<Inverse<const MachineBasicBlock*>> {
+  using NodeRef = const MachineBasicBlock *;
+  using ChildIteratorType = MachineBasicBlock::const_pred_iterator;
+
+  static NodeRef getEntryNode(Inverse<const MachineBasicBlock *> G) {
+    return G.Graph;
+  }
+
+  static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
+};
+
+/// MachineInstrSpan provides an interface to get an iteration range
+/// containing the instruction it was initialized with, along with all
+/// those instructions inserted prior to or following that instruction
+/// at some point after the MachineInstrSpan is constructed.
+class MachineInstrSpan {
+  MachineBasicBlock &MBB;
+  MachineBasicBlock::iterator I, B, E;
+
+public:
+  MachineInstrSpan(MachineBasicBlock::iterator I)
+    : MBB(*I->getParent()),
+      I(I),
+      B(I == MBB.begin() ? MBB.end() : std::prev(I)),
+      E(std::next(I)) {}
+
+  MachineBasicBlock::iterator begin() {
+    return B == MBB.end() ? MBB.begin() : std::next(B);
+  }
+  MachineBasicBlock::iterator end() { return E; }
+  bool empty() { return begin() == end(); }
+
+  MachineBasicBlock::iterator getInitial() { return I; }
+};
+
+/// Increment \p It until it points to a non-debug instruction or to \p End
+/// and return the resulting iterator. This function should only be used
+/// MachineBasicBlock::{iterator, const_iterator, instr_iterator,
+/// const_instr_iterator} and the respective reverse iterators.
+template<typename IterT>
+inline IterT skipDebugInstructionsForward(IterT It, IterT End) {
+  while (It != End && It->isDebugValue())
+    It++;
+  return It;
+}
+
+/// Decrement \p It until it points to a non-debug instruction or to \p Begin
+/// and return the resulting iterator. This function should only be used
+/// MachineBasicBlock::{iterator, const_iterator, instr_iterator,
+/// const_instr_iterator} and the respective reverse iterators.
+template<class IterT>
+inline IterT skipDebugInstructionsBackward(IterT It, IterT Begin) {
+  while (It != Begin && It->isDebugValue())
+    It--;
+  return It;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEBASICBLOCK_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineBlockFrequencyInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
new file mode 100644
index 0000000..5b4b99c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
@@ -0,0 +1,85 @@
+//===- MachineBlockFrequencyInfo.h - MBB Frequency Analysis -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Loops should be simplified before this analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEBLOCKFREQUENCYINFO_H
+#define LLVM_CODEGEN_MACHINEBLOCKFREQUENCYINFO_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Support/BlockFrequency.h"
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+
+template <class BlockT> class BlockFrequencyInfoImpl;
+class MachineBasicBlock;
+class MachineBranchProbabilityInfo;
+class MachineFunction;
+class MachineLoopInfo;
+class raw_ostream;
+
+/// MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation
+/// to estimate machine basic block frequencies.
+class MachineBlockFrequencyInfo : public MachineFunctionPass {
+  using ImplType = BlockFrequencyInfoImpl<MachineBasicBlock>;
+  std::unique_ptr<ImplType> MBFI;
+
+public:
+  static char ID;
+
+  MachineBlockFrequencyInfo();
+  ~MachineBlockFrequencyInfo() override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  bool runOnMachineFunction(MachineFunction &F) override;
+
+  /// calculate - compute block frequency info for the given function.
+  void calculate(const MachineFunction &F,
+                 const MachineBranchProbabilityInfo &MBPI,
+                 const MachineLoopInfo &MLI);
+
+  void releaseMemory() override;
+
+  /// getblockFreq - Return block frequency. Return 0 if we don't have the
+  /// information. Please note that initial frequency is equal to 1024. It means
+  /// that we should not rely on the value itself, but only on the comparison to
+  /// the other block frequencies. We do this to avoid using of floating points.
+  ///
+  BlockFrequency getBlockFreq(const MachineBasicBlock *MBB) const;
+
+  Optional<uint64_t> getBlockProfileCount(const MachineBasicBlock *MBB) const;
+  Optional<uint64_t> getProfileCountFromFreq(uint64_t Freq) const;
+
+  bool isIrrLoopHeader(const MachineBasicBlock *MBB);
+
+  const MachineFunction *getFunction() const;
+  const MachineBranchProbabilityInfo *getMBPI() const;
+  void view(const Twine &Name, bool isSimple = true) const;
+
+  // Print the block frequency Freq to OS using the current functions entry
+  // frequency to convert freq into a relative decimal form.
+  raw_ostream &printBlockFreq(raw_ostream &OS, const BlockFrequency Freq) const;
+
+  // Convenience method that attempts to look up the frequency associated with
+  // BB and print it to OS.
+  raw_ostream &printBlockFreq(raw_ostream &OS,
+                              const MachineBasicBlock *MBB) const;
+
+  uint64_t getEntryFreq() const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEBLOCKFREQUENCYINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineBranchProbabilityInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineBranchProbabilityInfo.h
new file mode 100644
index 0000000..81b0524
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineBranchProbabilityInfo.h
@@ -0,0 +1,77 @@
+//=- MachineBranchProbabilityInfo.h - Branch Probability Analysis -*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass is used to evaluate branch probabilties on machine basic blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEBRANCHPROBABILITYINFO_H
+#define LLVM_CODEGEN_MACHINEBRANCHPROBABILITYINFO_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/BranchProbability.h"
+#include <climits>
+#include <numeric>
+
+namespace llvm {
+
+class MachineBranchProbabilityInfo : public ImmutablePass {
+  virtual void anchor();
+
+  // Default weight value. Used when we don't have information about the edge.
+  // TODO: DEFAULT_WEIGHT makes sense during static predication, when none of
+  // the successors have a weight yet. But it doesn't make sense when providing
+  // weight to an edge that may have siblings with non-zero weights. This can
+  // be handled various ways, but it's probably fine for an edge with unknown
+  // weight to just "inherit" the non-zero weight of an adjacent successor.
+  static const uint32_t DEFAULT_WEIGHT = 16;
+
+public:
+  static char ID;
+
+  MachineBranchProbabilityInfo() : ImmutablePass(ID) {
+    PassRegistry &Registry = *PassRegistry::getPassRegistry();
+    initializeMachineBranchProbabilityInfoPass(Registry);
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+  }
+
+  // Return edge probability.
+  BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
+                                       const MachineBasicBlock *Dst) const;
+
+  // Same as above, but using a const_succ_iterator from Src. This is faster
+  // when the iterator is already available.
+  BranchProbability
+  getEdgeProbability(const MachineBasicBlock *Src,
+                     MachineBasicBlock::const_succ_iterator Dst) const;
+
+  // A 'Hot' edge is an edge which probability is >= 80%.
+  bool isEdgeHot(const MachineBasicBlock *Src,
+                 const MachineBasicBlock *Dst) const;
+
+  // Return a hot successor for the block BB or null if there isn't one.
+  // NB: This routine's complexity is linear on the number of successors.
+  MachineBasicBlock *getHotSucc(MachineBasicBlock *MBB) const;
+
+  // Print value between 0 (0% probability) and 1 (100% probability),
+  // however the value is never equal to 0, and can be 1 only iff SRC block
+  // has only one successor.
+  raw_ostream &printEdgeProbability(raw_ostream &OS,
+                                    const MachineBasicBlock *Src,
+                                    const MachineBasicBlock *Dst) const;
+};
+
+}
+
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineCombinerPattern.h b/linux-x64/clang/include/llvm/CodeGen/MachineCombinerPattern.h
new file mode 100644
index 0000000..586535f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineCombinerPattern.h
@@ -0,0 +1,87 @@
+//===-- llvm/CodeGen/MachineCombinerPattern.h - Instruction pattern supported by
+// combiner  ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines instruction pattern supported by combiner
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINECOMBINERPATTERN_H
+#define LLVM_CODEGEN_MACHINECOMBINERPATTERN_H
+
+namespace llvm {
+
+/// These are instruction patterns matched by the machine combiner pass.
+enum class MachineCombinerPattern {
+  // These are commutative variants for reassociating a computation chain. See
+  // the comments before getMachineCombinerPatterns() in TargetInstrInfo.cpp.
+  REASSOC_AX_BY,
+  REASSOC_AX_YB,
+  REASSOC_XA_BY,
+  REASSOC_XA_YB,
+
+  // These are multiply-add patterns matched by the AArch64 machine combiner.
+  MULADDW_OP1,
+  MULADDW_OP2,
+  MULSUBW_OP1,
+  MULSUBW_OP2,
+  MULADDWI_OP1,
+  MULSUBWI_OP1,
+  MULADDX_OP1,
+  MULADDX_OP2,
+  MULSUBX_OP1,
+  MULSUBX_OP2,
+  MULADDXI_OP1,
+  MULSUBXI_OP1,
+  // Floating Point
+  FMULADDS_OP1,
+  FMULADDS_OP2,
+  FMULSUBS_OP1,
+  FMULSUBS_OP2,
+  FMULADDD_OP1,
+  FMULADDD_OP2,
+  FMULSUBD_OP1,
+  FMULSUBD_OP2,
+  FNMULSUBS_OP1,
+  FNMULSUBD_OP1,
+  FMLAv1i32_indexed_OP1,
+  FMLAv1i32_indexed_OP2,
+  FMLAv1i64_indexed_OP1,
+  FMLAv1i64_indexed_OP2,
+  FMLAv2f32_OP2,
+  FMLAv2f32_OP1,
+  FMLAv2f64_OP1,
+  FMLAv2f64_OP2,
+  FMLAv2i32_indexed_OP1,
+  FMLAv2i32_indexed_OP2,
+  FMLAv2i64_indexed_OP1,
+  FMLAv2i64_indexed_OP2,
+  FMLAv4f32_OP1,
+  FMLAv4f32_OP2,
+  FMLAv4i32_indexed_OP1,
+  FMLAv4i32_indexed_OP2,
+  FMLSv1i32_indexed_OP2,
+  FMLSv1i64_indexed_OP2,
+  FMLSv2f32_OP1,
+  FMLSv2f32_OP2,
+  FMLSv2f64_OP1,
+  FMLSv2f64_OP2,
+  FMLSv2i32_indexed_OP1,
+  FMLSv2i32_indexed_OP2,
+  FMLSv2i64_indexed_OP1,
+  FMLSv2i64_indexed_OP2,
+  FMLSv4f32_OP1,
+  FMLSv4f32_OP2,
+  FMLSv4i32_indexed_OP1,
+  FMLSv4i32_indexed_OP2
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineConstantPool.h b/linux-x64/clang/include/llvm/CodeGen/MachineConstantPool.h
new file mode 100644
index 0000000..1705a0f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineConstantPool.h
@@ -0,0 +1,164 @@
+//===- CodeGen/MachineConstantPool.h - Abstract Constant Pool ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file
+/// This file declares the MachineConstantPool class which is an abstract
+/// constant pool to keep track of constants referenced by a function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINECONSTANTPOOL_H
+#define LLVM_CODEGEN_MACHINECONSTANTPOOL_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/MC/SectionKind.h"
+#include <climits>
+#include <vector>
+
+namespace llvm {
+
+class Constant;
+class DataLayout;
+class FoldingSetNodeID;
+class MachineConstantPool;
+class raw_ostream;
+class Type;
+
+/// Abstract base class for all machine specific constantpool value subclasses.
+///
+class MachineConstantPoolValue {
+  virtual void anchor();
+
+  Type *Ty;
+
+public:
+  explicit MachineConstantPoolValue(Type *ty) : Ty(ty) {}
+  virtual ~MachineConstantPoolValue() = default;
+
+  /// getType - get type of this MachineConstantPoolValue.
+  ///
+  Type *getType() const { return Ty; }
+
+  virtual int getExistingMachineCPValue(MachineConstantPool *CP,
+                                        unsigned Alignment) = 0;
+
+  virtual void addSelectionDAGCSEId(FoldingSetNodeID &ID) = 0;
+
+  /// print - Implement operator<<
+  virtual void print(raw_ostream &O) const = 0;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS,
+                               const MachineConstantPoolValue &V) {
+  V.print(OS);
+  return OS;
+}
+
+/// This class is a data container for one entry in a MachineConstantPool.
+/// It contains a pointer to the value and an offset from the start of
+/// the constant pool.
+/// @brief An entry in a MachineConstantPool
+class MachineConstantPoolEntry {
+public:
+  /// The constant itself.
+  union {
+    const Constant *ConstVal;
+    MachineConstantPoolValue *MachineCPVal;
+  } Val;
+
+  /// The required alignment for this entry. The top bit is set when Val is
+  /// a target specific MachineConstantPoolValue.
+  unsigned Alignment;
+
+  MachineConstantPoolEntry(const Constant *V, unsigned A)
+    : Alignment(A) {
+    Val.ConstVal = V;
+  }
+
+  MachineConstantPoolEntry(MachineConstantPoolValue *V, unsigned A)
+      : Alignment(A) {
+    Val.MachineCPVal = V;
+    Alignment |= 1U << (sizeof(unsigned) * CHAR_BIT - 1);
+  }
+
+  /// isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry
+  /// is indeed a target specific constantpool entry, not a wrapper over a
+  /// Constant.
+  bool isMachineConstantPoolEntry() const {
+    return (int)Alignment < 0;
+  }
+
+  int getAlignment() const {
+    return Alignment & ~(1 << (sizeof(unsigned) * CHAR_BIT - 1));
+  }
+
+  Type *getType() const;
+
+  /// This method classifies the entry according to whether or not it may
+  /// generate a relocation entry.  This must be conservative, so if it might
+  /// codegen to a relocatable entry, it should say so.
+  bool needsRelocation() const;
+
+  SectionKind getSectionKind(const DataLayout *DL) const;
+};
+
+/// The MachineConstantPool class keeps track of constants referenced by a
+/// function which must be spilled to memory.  This is used for constants which
+/// are unable to be used directly as operands to instructions, which typically
+/// include floating point and large integer constants.
+///
+/// Instructions reference the address of these constant pool constants through
+/// the use of MO_ConstantPoolIndex values.  When emitting assembly or machine
+/// code, these virtual address references are converted to refer to the
+/// address of the function constant pool values.
+/// @brief The machine constant pool.
+class MachineConstantPool {
+  unsigned PoolAlignment;       ///< The alignment for the pool.
+  std::vector<MachineConstantPoolEntry> Constants; ///< The pool of constants.
+  /// MachineConstantPoolValues that use an existing MachineConstantPoolEntry.
+  DenseSet<MachineConstantPoolValue*> MachineCPVsSharingEntries;
+  const DataLayout &DL;
+
+  const DataLayout &getDataLayout() const { return DL; }
+
+public:
+  /// @brief The only constructor.
+  explicit MachineConstantPool(const DataLayout &DL)
+      : PoolAlignment(1), DL(DL) {}
+  ~MachineConstantPool();
+
+  /// getConstantPoolAlignment - Return the alignment required by
+  /// the whole constant pool, of which the first element must be aligned.
+  unsigned getConstantPoolAlignment() const { return PoolAlignment; }
+
+  /// getConstantPoolIndex - Create a new entry in the constant pool or return
+  /// an existing one.  User must specify the minimum required alignment for
+  /// the object.
+  unsigned getConstantPoolIndex(const Constant *C, unsigned Alignment);
+  unsigned getConstantPoolIndex(MachineConstantPoolValue *V,
+                                unsigned Alignment);
+
+  /// isEmpty - Return true if this constant pool contains no constants.
+  bool isEmpty() const { return Constants.empty(); }
+
+  const std::vector<MachineConstantPoolEntry> &getConstants() const {
+    return Constants;
+  }
+
+  /// print - Used by the MachineFunction printer to print information about
+  /// constant pool objects.  Implemented in MachineFunction.cpp
+  void print(raw_ostream &OS) const;
+
+  /// dump - Call print(cerr) to be called from the debugger.
+  void dump() const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINECONSTANTPOOL_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineDominanceFrontier.h b/linux-x64/clang/include/llvm/CodeGen/MachineDominanceFrontier.h
new file mode 100644
index 0000000..ffbcc62
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineDominanceFrontier.h
@@ -0,0 +1,111 @@
+//===- llvm/CodeGen/MachineDominanceFrontier.h ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H
+#define LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H
+
+#include "llvm/Analysis/DominanceFrontier.h"
+#include "llvm/Analysis/DominanceFrontierImpl.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Support/GenericDomTree.h"
+#include <vector>
+
+namespace llvm {
+
+class MachineDominanceFrontier : public MachineFunctionPass {
+  ForwardDominanceFrontierBase<MachineBasicBlock> Base;
+
+public:
+ using DomTreeT = DomTreeBase<MachineBasicBlock>;
+ using DomTreeNodeT = DomTreeNodeBase<MachineBasicBlock>;
+ using DomSetType = DominanceFrontierBase<MachineBasicBlock, false>::DomSetType;
+ using iterator = DominanceFrontierBase<MachineBasicBlock, false>::iterator;
+ using const_iterator =
+     DominanceFrontierBase<MachineBasicBlock, false>::const_iterator;
+
+ MachineDominanceFrontier(const MachineDominanceFrontier &) = delete;
+ MachineDominanceFrontier &operator=(const MachineDominanceFrontier &) = delete;
+
+ static char ID;
+
+ MachineDominanceFrontier();
+
+ DominanceFrontierBase<MachineBasicBlock, false> &getBase() { return Base; }
+
+  const SmallVectorImpl<MachineBasicBlock *> &getRoots() const {
+   return Base.getRoots();
+  }
+
+  MachineBasicBlock *getRoot() const {
+    return Base.getRoot();
+  }
+
+  bool isPostDominator() const {
+    return Base.isPostDominator();
+  }
+
+  iterator begin() {
+    return Base.begin();
+  }
+
+  const_iterator begin() const {
+    return Base.begin();
+  }
+
+  iterator end() {
+    return Base.end();
+  }
+
+  const_iterator end() const {
+    return Base.end();
+  }
+
+  iterator find(MachineBasicBlock *B) {
+    return Base.find(B);
+  }
+
+  const_iterator find(MachineBasicBlock *B) const {
+    return Base.find(B);
+  }
+
+  iterator addBasicBlock(MachineBasicBlock *BB, const DomSetType &frontier) {
+    return Base.addBasicBlock(BB, frontier);
+  }
+
+  void removeBlock(MachineBasicBlock *BB) {
+    return Base.removeBlock(BB);
+  }
+
+  void addToFrontier(iterator I, MachineBasicBlock *Node) {
+    return Base.addToFrontier(I, Node);
+  }
+
+  void removeFromFrontier(iterator I, MachineBasicBlock *Node) {
+    return Base.removeFromFrontier(I, Node);
+  }
+
+  bool compareDomSet(DomSetType &DS1, const DomSetType &DS2) const {
+    return Base.compareDomSet(DS1, DS2);
+  }
+
+  bool compare(DominanceFrontierBase<MachineBasicBlock, false> &Other) const {
+    return Base.compare(Other);
+  }
+
+  bool runOnMachineFunction(MachineFunction &F) override;
+
+  void releaseMemory() override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineDominators.h b/linux-x64/clang/include/llvm/CodeGen/MachineDominators.h
new file mode 100644
index 0000000..af642d9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineDominators.h
@@ -0,0 +1,291 @@
+//==- llvm/CodeGen/MachineDominators.h - Machine Dom Calculation -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes mirroring those in llvm/Analysis/Dominators.h,
+// but for target-specific code rather than target-independent IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEDOMINATORS_H
+#define LLVM_CODEGEN_MACHINEDOMINATORS_H
+
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/Support/GenericDomTree.h"
+#include "llvm/Support/GenericDomTreeConstruction.h"
+#include <cassert>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+template <>
+inline void DominatorTreeBase<MachineBasicBlock, false>::addRoot(
+    MachineBasicBlock *MBB) {
+  this->Roots.push_back(MBB);
+}
+
+extern template class DomTreeNodeBase<MachineBasicBlock>;
+extern template class DominatorTreeBase<MachineBasicBlock, false>; // DomTree
+extern template class DominatorTreeBase<MachineBasicBlock, true>; // PostDomTree
+
+using MachineDomTreeNode = DomTreeNodeBase<MachineBasicBlock>;
+
+//===-------------------------------------
+/// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to
+/// compute a normal dominator tree.
+///
+class MachineDominatorTree : public MachineFunctionPass {
+  /// \brief Helper structure used to hold all the basic blocks
+  /// involved in the split of a critical edge.
+  struct CriticalEdge {
+    MachineBasicBlock *FromBB;
+    MachineBasicBlock *ToBB;
+    MachineBasicBlock *NewBB;
+  };
+
+  /// \brief Pile up all the critical edges to be split.
+  /// The splitting of a critical edge is local and thus, it is possible
+  /// to apply several of those changes at the same time.
+  mutable SmallVector<CriticalEdge, 32> CriticalEdgesToSplit;
+
+  /// \brief Remember all the basic blocks that are inserted during
+  /// edge splitting.
+  /// Invariant: NewBBs == all the basic blocks contained in the NewBB
+  /// field of all the elements of CriticalEdgesToSplit.
+  /// I.e., forall elt in CriticalEdgesToSplit, it exists BB in NewBBs
+  /// such as BB == elt.NewBB.
+  mutable SmallSet<MachineBasicBlock *, 32> NewBBs;
+
+  /// The DominatorTreeBase that is used to compute a normal dominator tree
+  std::unique_ptr<DomTreeBase<MachineBasicBlock>> DT;
+
+  /// \brief Apply all the recorded critical edges to the DT.
+  /// This updates the underlying DT information in a way that uses
+  /// the fast query path of DT as much as possible.
+  ///
+  /// \post CriticalEdgesToSplit.empty().
+  void applySplitCriticalEdges() const;
+
+public:
+  static char ID; // Pass ID, replacement for typeid
+
+  MachineDominatorTree();
+
+  DomTreeBase<MachineBasicBlock> &getBase() {
+    if (!DT) DT.reset(new DomTreeBase<MachineBasicBlock>());
+    applySplitCriticalEdges();
+    return *DT;
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  /// getRoots -  Return the root blocks of the current CFG.  This may include
+  /// multiple blocks if we are computing post dominators.  For forward
+  /// dominators, this will always be a single block (the entry node).
+  ///
+  inline const SmallVectorImpl<MachineBasicBlock*> &getRoots() const {
+    applySplitCriticalEdges();
+    return DT->getRoots();
+  }
+
+  inline MachineBasicBlock *getRoot() const {
+    applySplitCriticalEdges();
+    return DT->getRoot();
+  }
+
+  inline MachineDomTreeNode *getRootNode() const {
+    applySplitCriticalEdges();
+    return DT->getRootNode();
+  }
+
+  bool runOnMachineFunction(MachineFunction &F) override;
+
+  inline bool dominates(const MachineDomTreeNode* A,
+                        const MachineDomTreeNode* B) const {
+    applySplitCriticalEdges();
+    return DT->dominates(A, B);
+  }
+
+  inline bool dominates(const MachineBasicBlock* A,
+                        const MachineBasicBlock* B) const {
+    applySplitCriticalEdges();
+    return DT->dominates(A, B);
+  }
+
+  // dominates - Return true if A dominates B. This performs the
+  // special checks necessary if A and B are in the same basic block.
+  bool dominates(const MachineInstr *A, const MachineInstr *B) const {
+    applySplitCriticalEdges();
+    const MachineBasicBlock *BBA = A->getParent(), *BBB = B->getParent();
+    if (BBA != BBB) return DT->dominates(BBA, BBB);
+
+    // Loop through the basic block until we find A or B.
+    MachineBasicBlock::const_iterator I = BBA->begin();
+    for (; &*I != A && &*I != B; ++I)
+      /*empty*/ ;
+
+    //if(!DT.IsPostDominators) {
+      // A dominates B if it is found first in the basic block.
+      return &*I == A;
+    //} else {
+    //  // A post-dominates B if B is found first in the basic block.
+    //  return &*I == B;
+    //}
+  }
+
+  inline bool properlyDominates(const MachineDomTreeNode* A,
+                                const MachineDomTreeNode* B) const {
+    applySplitCriticalEdges();
+    return DT->properlyDominates(A, B);
+  }
+
+  inline bool properlyDominates(const MachineBasicBlock* A,
+                                const MachineBasicBlock* B) const {
+    applySplitCriticalEdges();
+    return DT->properlyDominates(A, B);
+  }
+
+  /// findNearestCommonDominator - Find nearest common dominator basic block
+  /// for basic block A and B. If there is no such block then return NULL.
+  inline MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A,
+                                                       MachineBasicBlock *B) {
+    applySplitCriticalEdges();
+    return DT->findNearestCommonDominator(A, B);
+  }
+
+  inline MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
+    applySplitCriticalEdges();
+    return DT->getNode(BB);
+  }
+
+  /// getNode - return the (Post)DominatorTree node for the specified basic
+  /// block.  This is the same as using operator[] on this class.
+  ///
+  inline MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
+    applySplitCriticalEdges();
+    return DT->getNode(BB);
+  }
+
+  /// addNewBlock - Add a new node to the dominator tree information.  This
+  /// creates a new node as a child of DomBB dominator node,linking it into
+  /// the children list of the immediate dominator.
+  inline MachineDomTreeNode *addNewBlock(MachineBasicBlock *BB,
+                                         MachineBasicBlock *DomBB) {
+    applySplitCriticalEdges();
+    return DT->addNewBlock(BB, DomBB);
+  }
+
+  /// changeImmediateDominator - This method is used to update the dominator
+  /// tree information when a node's immediate dominator changes.
+  ///
+  inline void changeImmediateDominator(MachineBasicBlock *N,
+                                       MachineBasicBlock* NewIDom) {
+    applySplitCriticalEdges();
+    DT->changeImmediateDominator(N, NewIDom);
+  }
+
+  inline void changeImmediateDominator(MachineDomTreeNode *N,
+                                       MachineDomTreeNode* NewIDom) {
+    applySplitCriticalEdges();
+    DT->changeImmediateDominator(N, NewIDom);
+  }
+
+  /// eraseNode - Removes a node from  the dominator tree. Block must not
+  /// dominate any other blocks. Removes node from its immediate dominator's
+  /// children list. Deletes dominator node associated with basic block BB.
+  inline void eraseNode(MachineBasicBlock *BB) {
+    applySplitCriticalEdges();
+    DT->eraseNode(BB);
+  }
+
+  /// splitBlock - BB is split and now it has one successor. Update dominator
+  /// tree to reflect this change.
+  inline void splitBlock(MachineBasicBlock* NewBB) {
+    applySplitCriticalEdges();
+    DT->splitBlock(NewBB);
+  }
+
+  /// isReachableFromEntry - Return true if A is dominated by the entry
+  /// block of the function containing it.
+  bool isReachableFromEntry(const MachineBasicBlock *A) {
+    applySplitCriticalEdges();
+    return DT->isReachableFromEntry(A);
+  }
+
+  void releaseMemory() override;
+
+  void verifyAnalysis() const override;
+
+  void print(raw_ostream &OS, const Module*) const override;
+
+  /// \brief Record that the critical edge (FromBB, ToBB) has been
+  /// split with NewBB.
+  /// This is best to use this method instead of directly update the
+  /// underlying information, because this helps mitigating the
+  /// number of time the DT information is invalidated.
+  ///
+  /// \note Do not use this method with regular edges.
+  ///
+  /// \note To benefit from the compile time improvement incurred by this
+  /// method, the users of this method have to limit the queries to the DT
+  /// interface between two edges splitting. In other words, they have to
+  /// pack the splitting of critical edges as much as possible.
+  void recordSplitCriticalEdge(MachineBasicBlock *FromBB,
+                              MachineBasicBlock *ToBB,
+                              MachineBasicBlock *NewBB) {
+    bool Inserted = NewBBs.insert(NewBB).second;
+    (void)Inserted;
+    assert(Inserted &&
+           "A basic block inserted via edge splitting cannot appear twice");
+    CriticalEdgesToSplit.push_back({FromBB, ToBB, NewBB});
+  }
+};
+
+//===-------------------------------------
+/// DominatorTree GraphTraits specialization so the DominatorTree can be
+/// iterable by generic graph iterators.
+///
+
+template <class Node, class ChildIterator>
+struct MachineDomTreeGraphTraitsBase {
+  using NodeRef = Node *;
+  using ChildIteratorType = ChildIterator;
+
+  static NodeRef getEntryNode(NodeRef N) { return N; }
+  static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->end(); }
+};
+
+template <class T> struct GraphTraits;
+
+template <>
+struct GraphTraits<MachineDomTreeNode *>
+    : public MachineDomTreeGraphTraitsBase<MachineDomTreeNode,
+                                           MachineDomTreeNode::iterator> {};
+
+template <>
+struct GraphTraits<const MachineDomTreeNode *>
+    : public MachineDomTreeGraphTraitsBase<const MachineDomTreeNode,
+                                           MachineDomTreeNode::const_iterator> {
+};
+
+template <> struct GraphTraits<MachineDominatorTree*>
+  : public GraphTraits<MachineDomTreeNode *> {
+  static NodeRef getEntryNode(MachineDominatorTree *DT) {
+    return DT->getRootNode();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEDOMINATORS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineFrameInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineFrameInfo.h
new file mode 100644
index 0000000..f887517
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineFrameInfo.h
@@ -0,0 +1,724 @@
+//===-- CodeGen/MachineFrameInfo.h - Abstract Stack Frame Rep. --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The file defines the MachineFrameInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEFRAMEINFO_H
+#define LLVM_CODEGEN_MACHINEFRAMEINFO_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <vector>
+
+namespace llvm {
+class raw_ostream;
+class MachineFunction;
+class MachineBasicBlock;
+class BitVector;
+class AllocaInst;
+
+/// The CalleeSavedInfo class tracks the information need to locate where a
+/// callee saved register is in the current frame.
+class CalleeSavedInfo {
+  unsigned Reg;
+  int FrameIdx;
+  /// Flag indicating whether the register is actually restored in the epilog.
+  /// In most cases, if a register is saved, it is also restored. There are
+  /// some situations, though, when this is not the case. For example, the
+  /// LR register on ARM is usually saved, but on exit from the function its
+  /// saved value may be loaded directly into PC. Since liveness tracking of
+  /// physical registers treats callee-saved registers are live outside of
+  /// the function, LR would be treated as live-on-exit, even though in these
+  /// scenarios it is not. This flag is added to indicate that the saved
+  /// register described by this object is not restored in the epilog.
+  /// The long-term solution is to model the liveness of callee-saved registers
+  /// by implicit uses on the return instructions, however, the required
+  /// changes in the ARM backend would be quite extensive.
+  bool Restored;
+
+public:
+  explicit CalleeSavedInfo(unsigned R, int FI = 0)
+  : Reg(R), FrameIdx(FI), Restored(true) {}
+
+  // Accessors.
+  unsigned getReg()                        const { return Reg; }
+  int getFrameIdx()                        const { return FrameIdx; }
+  void setFrameIdx(int FI)                       { FrameIdx = FI; }
+  bool isRestored()                        const { return Restored; }
+  void setRestored(bool R)                       { Restored = R; }
+};
+
+/// The MachineFrameInfo class represents an abstract stack frame until
+/// prolog/epilog code is inserted.  This class is key to allowing stack frame
+/// representation optimizations, such as frame pointer elimination.  It also
+/// allows more mundane (but still important) optimizations, such as reordering
+/// of abstract objects on the stack frame.
+///
+/// To support this, the class assigns unique integer identifiers to stack
+/// objects requested clients.  These identifiers are negative integers for
+/// fixed stack objects (such as arguments passed on the stack) or nonnegative
+/// for objects that may be reordered.  Instructions which refer to stack
+/// objects use a special MO_FrameIndex operand to represent these frame
+/// indexes.
+///
+/// Because this class keeps track of all references to the stack frame, it
+/// knows when a variable sized object is allocated on the stack.  This is the
+/// sole condition which prevents frame pointer elimination, which is an
+/// important optimization on register-poor architectures.  Because original
+/// variable sized alloca's in the source program are the only source of
+/// variable sized stack objects, it is safe to decide whether there will be
+/// any variable sized objects before all stack objects are known (for
+/// example, register allocator spill code never needs variable sized
+/// objects).
+///
+/// When prolog/epilog code emission is performed, the final stack frame is
+/// built and the machine instructions are modified to refer to the actual
+/// stack offsets of the object, eliminating all MO_FrameIndex operands from
+/// the program.
+///
+/// @brief Abstract Stack Frame Information
+class MachineFrameInfo {
+
+  // Represent a single object allocated on the stack.
+  struct StackObject {
+    // The offset of this object from the stack pointer on entry to
+    // the function.  This field has no meaning for a variable sized element.
+    int64_t SPOffset;
+
+    // The size of this object on the stack. 0 means a variable sized object,
+    // ~0ULL means a dead object.
+    uint64_t Size;
+
+    // The required alignment of this stack slot.
+    unsigned Alignment;
+
+    // If true, the value of the stack object is set before
+    // entering the function and is not modified inside the function. By
+    // default, fixed objects are immutable unless marked otherwise.
+    bool isImmutable;
+
+    // If true the stack object is used as spill slot. It
+    // cannot alias any other memory objects.
+    bool isSpillSlot;
+
+    /// If true, this stack slot is used to spill a value (could be deopt
+    /// and/or GC related) over a statepoint. We know that the address of the
+    /// slot can't alias any LLVM IR value.  This is very similar to a Spill
+    /// Slot, but is created by statepoint lowering is SelectionDAG, not the
+    /// register allocator.
+    bool isStatepointSpillSlot = false;
+
+    /// Identifier for stack memory type analagous to address space. If this is
+    /// non-0, the meaning is target defined. Offsets cannot be directly
+    /// compared between objects with different stack IDs. The object may not
+    /// necessarily reside in the same contiguous memory block as other stack
+    /// objects. Objects with differing stack IDs should not be merged or
+    /// replaced substituted for each other.
+    uint8_t StackID;
+
+    /// If this stack object is originated from an Alloca instruction
+    /// this value saves the original IR allocation. Can be NULL.
+    const AllocaInst *Alloca;
+
+    // If true, the object was mapped into the local frame
+    // block and doesn't need additional handling for allocation beyond that.
+    bool PreAllocated = false;
+
+    // If true, an LLVM IR value might point to this object.
+    // Normally, spill slots and fixed-offset objects don't alias IR-accessible
+    // objects, but there are exceptions (on PowerPC, for example, some byval
+    // arguments have ABI-prescribed offsets).
+    bool isAliased;
+
+    /// If true, the object has been zero-extended.
+    bool isZExt = false;
+
+    /// If true, the object has been zero-extended.
+    bool isSExt = false;
+
+    StackObject(uint64_t Size, unsigned Alignment, int64_t SPOffset,
+                bool IsImmutable, bool IsSpillSlot, const AllocaInst *Alloca,
+                bool IsAliased, uint8_t StackID = 0)
+      : SPOffset(SPOffset), Size(Size), Alignment(Alignment),
+        isImmutable(IsImmutable), isSpillSlot(IsSpillSlot),
+        StackID(StackID), Alloca(Alloca), isAliased(IsAliased) {}
+  };
+
+  /// The alignment of the stack.
+  unsigned StackAlignment;
+
+  /// Can the stack be realigned. This can be false if the target does not
+  /// support stack realignment, or if the user asks us not to realign the
+  /// stack. In this situation, overaligned allocas are all treated as dynamic
+  /// allocations and the target must handle them as part of DYNAMIC_STACKALLOC
+  /// lowering. All non-alloca stack objects have their alignment clamped to the
+  /// base ABI stack alignment.
+  /// FIXME: There is room for improvement in this case, in terms of
+  /// grouping overaligned allocas into a "secondary stack frame" and
+  /// then only use a single alloca to allocate this frame and only a
+  /// single virtual register to access it. Currently, without such an
+  /// optimization, each such alloca gets its own dynamic realignment.
+  bool StackRealignable;
+
+  /// Whether the function has the \c alignstack attribute.
+  bool ForcedRealign;
+
+  /// The list of stack objects allocated.
+  std::vector<StackObject> Objects;
+
+  /// This contains the number of fixed objects contained on
+  /// the stack.  Because fixed objects are stored at a negative index in the
+  /// Objects list, this is also the index to the 0th object in the list.
+  unsigned NumFixedObjects = 0;
+
+  /// This boolean keeps track of whether any variable
+  /// sized objects have been allocated yet.
+  bool HasVarSizedObjects = false;
+
+  /// This boolean keeps track of whether there is a call
+  /// to builtin \@llvm.frameaddress.
+  bool FrameAddressTaken = false;
+
+  /// This boolean keeps track of whether there is a call
+  /// to builtin \@llvm.returnaddress.
+  bool ReturnAddressTaken = false;
+
+  /// This boolean keeps track of whether there is a call
+  /// to builtin \@llvm.experimental.stackmap.
+  bool HasStackMap = false;
+
+  /// This boolean keeps track of whether there is a call
+  /// to builtin \@llvm.experimental.patchpoint.
+  bool HasPatchPoint = false;
+
+  /// The prolog/epilog code inserter calculates the final stack
+  /// offsets for all of the fixed size objects, updating the Objects list
+  /// above.  It then updates StackSize to contain the number of bytes that need
+  /// to be allocated on entry to the function.
+  uint64_t StackSize = 0;
+
+  /// The amount that a frame offset needs to be adjusted to
+  /// have the actual offset from the stack/frame pointer.  The exact usage of
+  /// this is target-dependent, but it is typically used to adjust between
+  /// SP-relative and FP-relative offsets.  E.G., if objects are accessed via
+  /// SP then OffsetAdjustment is zero; if FP is used, OffsetAdjustment is set
+  /// to the distance between the initial SP and the value in FP.  For many
+  /// targets, this value is only used when generating debug info (via
+  /// TargetRegisterInfo::getFrameIndexReference); when generating code, the
+  /// corresponding adjustments are performed directly.
+  int OffsetAdjustment = 0;
+
+  /// The prolog/epilog code inserter may process objects that require greater
+  /// alignment than the default alignment the target provides.
+  /// To handle this, MaxAlignment is set to the maximum alignment
+  /// needed by the objects on the current frame.  If this is greater than the
+  /// native alignment maintained by the compiler, dynamic alignment code will
+  /// be needed.
+  ///
+  unsigned MaxAlignment = 0;
+
+  /// Set to true if this function adjusts the stack -- e.g.,
+  /// when calling another function. This is only valid during and after
+  /// prolog/epilog code insertion.
+  bool AdjustsStack = false;
+
+  /// Set to true if this function has any function calls.
+  bool HasCalls = false;
+
+  /// The frame index for the stack protector.
+  int StackProtectorIdx = -1;
+
+  /// The frame index for the function context. Used for SjLj exceptions.
+  int FunctionContextIdx = -1;
+
+  /// This contains the size of the largest call frame if the target uses frame
+  /// setup/destroy pseudo instructions (as defined in the TargetFrameInfo
+  /// class).  This information is important for frame pointer elimination.
+  /// It is only valid during and after prolog/epilog code insertion.
+  unsigned MaxCallFrameSize = ~0u;
+
+  /// The prolog/epilog code inserter fills in this vector with each
+  /// callee saved register saved in the frame.  Beyond its use by the prolog/
+  /// epilog code inserter, this data used for debug info and exception
+  /// handling.
+  std::vector<CalleeSavedInfo> CSInfo;
+
+  /// Has CSInfo been set yet?
+  bool CSIValid = false;
+
+  /// References to frame indices which are mapped
+  /// into the local frame allocation block. <FrameIdx, LocalOffset>
+  SmallVector<std::pair<int, int64_t>, 32> LocalFrameObjects;
+
+  /// Size of the pre-allocated local frame block.
+  int64_t LocalFrameSize = 0;
+
+  /// Required alignment of the local object blob, which is the strictest
+  /// alignment of any object in it.
+  unsigned LocalFrameMaxAlign = 0;
+
+  /// Whether the local object blob needs to be allocated together. If not,
+  /// PEI should ignore the isPreAllocated flags on the stack objects and
+  /// just allocate them normally.
+  bool UseLocalStackAllocationBlock = false;
+
+  /// True if the function dynamically adjusts the stack pointer through some
+  /// opaque mechanism like inline assembly or Win32 EH.
+  bool HasOpaqueSPAdjustment = false;
+
+  /// True if the function contains operations which will lower down to
+  /// instructions which manipulate the stack pointer.
+  bool HasCopyImplyingStackAdjustment = false;
+
+  /// True if the function contains a call to the llvm.vastart intrinsic.
+  bool HasVAStart = false;
+
+  /// True if this is a varargs function that contains a musttail call.
+  bool HasMustTailInVarArgFunc = false;
+
+  /// True if this function contains a tail call. If so immutable objects like
+  /// function arguments are no longer so. A tail call *can* override fixed
+  /// stack objects like arguments so we can't treat them as immutable.
+  bool HasTailCall = false;
+
+  /// Not null, if shrink-wrapping found a better place for the prologue.
+  MachineBasicBlock *Save = nullptr;
+  /// Not null, if shrink-wrapping found a better place for the epilogue.
+  MachineBasicBlock *Restore = nullptr;
+
+public:
+  explicit MachineFrameInfo(unsigned StackAlignment, bool StackRealignable,
+                            bool ForcedRealign)
+      : StackAlignment(StackAlignment), StackRealignable(StackRealignable),
+        ForcedRealign(ForcedRealign) {}
+
+  /// Return true if there are any stack objects in this function.
+  bool hasStackObjects() const { return !Objects.empty(); }
+
+  /// This method may be called any time after instruction
+  /// selection is complete to determine if the stack frame for this function
+  /// contains any variable sized objects.
+  bool hasVarSizedObjects() const { return HasVarSizedObjects; }
+
+  /// Return the index for the stack protector object.
+  int getStackProtectorIndex() const { return StackProtectorIdx; }
+  void setStackProtectorIndex(int I) { StackProtectorIdx = I; }
+  bool hasStackProtectorIndex() const { return StackProtectorIdx != -1; }
+
+  /// Return the index for the function context object.
+  /// This object is used for SjLj exceptions.
+  int getFunctionContextIndex() const { return FunctionContextIdx; }
+  void setFunctionContextIndex(int I) { FunctionContextIdx = I; }
+
+  /// This method may be called any time after instruction
+  /// selection is complete to determine if there is a call to
+  /// \@llvm.frameaddress in this function.
+  bool isFrameAddressTaken() const { return FrameAddressTaken; }
+  void setFrameAddressIsTaken(bool T) { FrameAddressTaken = T; }
+
+  /// This method may be called any time after
+  /// instruction selection is complete to determine if there is a call to
+  /// \@llvm.returnaddress in this function.
+  bool isReturnAddressTaken() const { return ReturnAddressTaken; }
+  void setReturnAddressIsTaken(bool s) { ReturnAddressTaken = s; }
+
+  /// This method may be called any time after instruction
+  /// selection is complete to determine if there is a call to builtin
+  /// \@llvm.experimental.stackmap.
+  bool hasStackMap() const { return HasStackMap; }
+  void setHasStackMap(bool s = true) { HasStackMap = s; }
+
+  /// This method may be called any time after instruction
+  /// selection is complete to determine if there is a call to builtin
+  /// \@llvm.experimental.patchpoint.
+  bool hasPatchPoint() const { return HasPatchPoint; }
+  void setHasPatchPoint(bool s = true) { HasPatchPoint = s; }
+
+  /// Return the minimum frame object index.
+  int getObjectIndexBegin() const { return -NumFixedObjects; }
+
+  /// Return one past the maximum frame object index.
+  int getObjectIndexEnd() const { return (int)Objects.size()-NumFixedObjects; }
+
+  /// Return the number of fixed objects.
+  unsigned getNumFixedObjects() const { return NumFixedObjects; }
+
+  /// Return the number of objects.
+  unsigned getNumObjects() const { return Objects.size(); }
+
+  /// Map a frame index into the local object block
+  void mapLocalFrameObject(int ObjectIndex, int64_t Offset) {
+    LocalFrameObjects.push_back(std::pair<int, int64_t>(ObjectIndex, Offset));
+    Objects[ObjectIndex + NumFixedObjects].PreAllocated = true;
+  }
+
+  /// Get the local offset mapping for a for an object.
+  std::pair<int, int64_t> getLocalFrameObjectMap(int i) const {
+    assert (i >= 0 && (unsigned)i < LocalFrameObjects.size() &&
+            "Invalid local object reference!");
+    return LocalFrameObjects[i];
+  }
+
+  /// Return the number of objects allocated into the local object block.
+  int64_t getLocalFrameObjectCount() const { return LocalFrameObjects.size(); }
+
+  /// Set the size of the local object blob.
+  void setLocalFrameSize(int64_t sz) { LocalFrameSize = sz; }
+
+  /// Get the size of the local object blob.
+  int64_t getLocalFrameSize() const { return LocalFrameSize; }
+
+  /// Required alignment of the local object blob,
+  /// which is the strictest alignment of any object in it.
+  void setLocalFrameMaxAlign(unsigned Align) { LocalFrameMaxAlign = Align; }
+
+  /// Return the required alignment of the local object blob.
+  unsigned getLocalFrameMaxAlign() const { return LocalFrameMaxAlign; }
+
+  /// Get whether the local allocation blob should be allocated together or
+  /// let PEI allocate the locals in it directly.
+  bool getUseLocalStackAllocationBlock() const {
+    return UseLocalStackAllocationBlock;
+  }
+
+  /// setUseLocalStackAllocationBlock - Set whether the local allocation blob
+  /// should be allocated together or let PEI allocate the locals in it
+  /// directly.
+  void setUseLocalStackAllocationBlock(bool v) {
+    UseLocalStackAllocationBlock = v;
+  }
+
+  /// Return true if the object was pre-allocated into the local block.
+  bool isObjectPreAllocated(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].PreAllocated;
+  }
+
+  /// Return the size of the specified object.
+  int64_t getObjectSize(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].Size;
+  }
+
+  /// Change the size of the specified stack object.
+  void setObjectSize(int ObjectIdx, int64_t Size) {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    Objects[ObjectIdx+NumFixedObjects].Size = Size;
+  }
+
+  /// Return the alignment of the specified stack object.
+  unsigned getObjectAlignment(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].Alignment;
+  }
+
+  /// setObjectAlignment - Change the alignment of the specified stack object.
+  void setObjectAlignment(int ObjectIdx, unsigned Align) {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    Objects[ObjectIdx+NumFixedObjects].Alignment = Align;
+    ensureMaxAlignment(Align);
+  }
+
+  /// Return the underlying Alloca of the specified
+  /// stack object if it exists. Returns 0 if none exists.
+  const AllocaInst* getObjectAllocation(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].Alloca;
+  }
+
+  /// Return the assigned stack offset of the specified object
+  /// from the incoming stack pointer.
+  int64_t getObjectOffset(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    assert(!isDeadObjectIndex(ObjectIdx) &&
+           "Getting frame offset for a dead object?");
+    return Objects[ObjectIdx+NumFixedObjects].SPOffset;
+  }
+
+  bool isObjectZExt(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].isZExt;
+  }
+
+  void setObjectZExt(int ObjectIdx, bool IsZExt) {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    Objects[ObjectIdx+NumFixedObjects].isZExt = IsZExt;
+  }
+
+  bool isObjectSExt(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].isSExt;
+  }
+
+  void setObjectSExt(int ObjectIdx, bool IsSExt) {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    Objects[ObjectIdx+NumFixedObjects].isSExt = IsSExt;
+  }
+
+  /// Set the stack frame offset of the specified object. The
+  /// offset is relative to the stack pointer on entry to the function.
+  void setObjectOffset(int ObjectIdx, int64_t SPOffset) {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    assert(!isDeadObjectIndex(ObjectIdx) &&
+           "Setting frame offset for a dead object?");
+    Objects[ObjectIdx+NumFixedObjects].SPOffset = SPOffset;
+  }
+
+  /// Return the number of bytes that must be allocated to hold
+  /// all of the fixed size frame objects.  This is only valid after
+  /// Prolog/Epilog code insertion has finalized the stack frame layout.
+  uint64_t getStackSize() const { return StackSize; }
+
+  /// Set the size of the stack.
+  void setStackSize(uint64_t Size) { StackSize = Size; }
+
+  /// Estimate and return the size of the stack frame.
+  unsigned estimateStackSize(const MachineFunction &MF) const;
+
+  /// Return the correction for frame offsets.
+  int getOffsetAdjustment() const { return OffsetAdjustment; }
+
+  /// Set the correction for frame offsets.
+  void setOffsetAdjustment(int Adj) { OffsetAdjustment = Adj; }
+
+  /// Return the alignment in bytes that this function must be aligned to,
+  /// which is greater than the default stack alignment provided by the target.
+  unsigned getMaxAlignment() const { return MaxAlignment; }
+
+  /// Make sure the function is at least Align bytes aligned.
+  void ensureMaxAlignment(unsigned Align);
+
+  /// Return true if this function adjusts the stack -- e.g.,
+  /// when calling another function. This is only valid during and after
+  /// prolog/epilog code insertion.
+  bool adjustsStack() const { return AdjustsStack; }
+  void setAdjustsStack(bool V) { AdjustsStack = V; }
+
+  /// Return true if the current function has any function calls.
+  bool hasCalls() const { return HasCalls; }
+  void setHasCalls(bool V) { HasCalls = V; }
+
+  /// Returns true if the function contains opaque dynamic stack adjustments.
+  bool hasOpaqueSPAdjustment() const { return HasOpaqueSPAdjustment; }
+  void setHasOpaqueSPAdjustment(bool B) { HasOpaqueSPAdjustment = B; }
+
+  /// Returns true if the function contains operations which will lower down to
+  /// instructions which manipulate the stack pointer.
+  bool hasCopyImplyingStackAdjustment() const {
+    return HasCopyImplyingStackAdjustment;
+  }
+  void setHasCopyImplyingStackAdjustment(bool B) {
+    HasCopyImplyingStackAdjustment = B;
+  }
+
+  /// Returns true if the function calls the llvm.va_start intrinsic.
+  bool hasVAStart() const { return HasVAStart; }
+  void setHasVAStart(bool B) { HasVAStart = B; }
+
+  /// Returns true if the function is variadic and contains a musttail call.
+  bool hasMustTailInVarArgFunc() const { return HasMustTailInVarArgFunc; }
+  void setHasMustTailInVarArgFunc(bool B) { HasMustTailInVarArgFunc = B; }
+
+  /// Returns true if the function contains a tail call.
+  bool hasTailCall() const { return HasTailCall; }
+  void setHasTailCall() { HasTailCall = true; }
+
+  /// Computes the maximum size of a callframe and the AdjustsStack property.
+  /// This only works for targets defining
+  /// TargetInstrInfo::getCallFrameSetupOpcode(), getCallFrameDestroyOpcode(),
+  /// and getFrameSize().
+  /// This is usually computed by the prologue epilogue inserter but some
+  /// targets may call this to compute it earlier.
+  void computeMaxCallFrameSize(const MachineFunction &MF);
+
+  /// Return the maximum size of a call frame that must be
+  /// allocated for an outgoing function call.  This is only available if
+  /// CallFrameSetup/Destroy pseudo instructions are used by the target, and
+  /// then only during or after prolog/epilog code insertion.
+  ///
+  unsigned getMaxCallFrameSize() const {
+    // TODO: Enable this assert when targets are fixed.
+    //assert(isMaxCallFrameSizeComputed() && "MaxCallFrameSize not computed yet");
+    if (!isMaxCallFrameSizeComputed())
+      return 0;
+    return MaxCallFrameSize;
+  }
+  bool isMaxCallFrameSizeComputed() const {
+    return MaxCallFrameSize != ~0u;
+  }
+  void setMaxCallFrameSize(unsigned S) { MaxCallFrameSize = S; }
+
+  /// Create a new object at a fixed location on the stack.
+  /// All fixed objects should be created before other objects are created for
+  /// efficiency. By default, fixed objects are not pointed to by LLVM IR
+  /// values. This returns an index with a negative value.
+  int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable,
+                        bool isAliased = false);
+
+  /// Create a spill slot at a fixed location on the stack.
+  /// Returns an index with a negative value.
+  int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset,
+                                  bool IsImmutable = false);
+
+  /// Returns true if the specified index corresponds to a fixed stack object.
+  bool isFixedObjectIndex(int ObjectIdx) const {
+    return ObjectIdx < 0 && (ObjectIdx >= -(int)NumFixedObjects);
+  }
+
+  /// Returns true if the specified index corresponds
+  /// to an object that might be pointed to by an LLVM IR value.
+  bool isAliasedObjectIndex(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].isAliased;
+  }
+
+  /// Returns true if the specified index corresponds to an immutable object.
+  bool isImmutableObjectIndex(int ObjectIdx) const {
+    // Tail calling functions can clobber their function arguments.
+    if (HasTailCall)
+      return false;
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].isImmutable;
+  }
+
+  /// Marks the immutability of an object.
+  void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable) {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    Objects[ObjectIdx+NumFixedObjects].isImmutable = IsImmutable;
+  }
+
+  /// Returns true if the specified index corresponds to a spill slot.
+  bool isSpillSlotObjectIndex(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].isSpillSlot;
+  }
+
+  bool isStatepointSpillSlotObjectIndex(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].isStatepointSpillSlot;
+  }
+
+  /// \see StackID
+  uint8_t getStackID(int ObjectIdx) const {
+    return Objects[ObjectIdx+NumFixedObjects].StackID;
+  }
+
+  /// \see StackID
+  void setStackID(int ObjectIdx, uint8_t ID) {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    Objects[ObjectIdx+NumFixedObjects].StackID = ID;
+  }
+
+  /// Returns true if the specified index corresponds to a dead object.
+  bool isDeadObjectIndex(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx+NumFixedObjects].Size == ~0ULL;
+  }
+
+  /// Returns true if the specified index corresponds to a variable sized
+  /// object.
+  bool isVariableSizedObjectIndex(int ObjectIdx) const {
+    assert(unsigned(ObjectIdx + NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    return Objects[ObjectIdx + NumFixedObjects].Size == 0;
+  }
+
+  void markAsStatepointSpillSlotObjectIndex(int ObjectIdx) {
+    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+           "Invalid Object Idx!");
+    Objects[ObjectIdx+NumFixedObjects].isStatepointSpillSlot = true;
+    assert(isStatepointSpillSlotObjectIndex(ObjectIdx) && "inconsistent");
+  }
+
+  /// Create a new statically sized stack object, returning
+  /// a nonnegative identifier to represent it.
+  int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot,
+                        const AllocaInst *Alloca = nullptr, uint8_t ID = 0);
+
+  /// Create a new statically sized stack object that represents a spill slot,
+  /// returning a nonnegative identifier to represent it.
+  int CreateSpillStackObject(uint64_t Size, unsigned Alignment);
+
+  /// Remove or mark dead a statically sized stack object.
+  void RemoveStackObject(int ObjectIdx) {
+    // Mark it dead.
+    Objects[ObjectIdx+NumFixedObjects].Size = ~0ULL;
+  }
+
+  /// Notify the MachineFrameInfo object that a variable sized object has been
+  /// created.  This must be created whenever a variable sized object is
+  /// created, whether or not the index returned is actually used.
+  int CreateVariableSizedObject(unsigned Alignment, const AllocaInst *Alloca);
+
+  /// Returns a reference to call saved info vector for the current function.
+  const std::vector<CalleeSavedInfo> &getCalleeSavedInfo() const {
+    return CSInfo;
+  }
+  /// \copydoc getCalleeSavedInfo()
+  std::vector<CalleeSavedInfo> &getCalleeSavedInfo() { return CSInfo; }
+
+  /// Used by prolog/epilog inserter to set the function's callee saved
+  /// information.
+  void setCalleeSavedInfo(const std::vector<CalleeSavedInfo> &CSI) {
+    CSInfo = CSI;
+  }
+
+  /// Has the callee saved info been calculated yet?
+  bool isCalleeSavedInfoValid() const { return CSIValid; }
+
+  void setCalleeSavedInfoValid(bool v) { CSIValid = v; }
+
+  MachineBasicBlock *getSavePoint() const { return Save; }
+  void setSavePoint(MachineBasicBlock *NewSave) { Save = NewSave; }
+  MachineBasicBlock *getRestorePoint() const { return Restore; }
+  void setRestorePoint(MachineBasicBlock *NewRestore) { Restore = NewRestore; }
+
+  /// Return a set of physical registers that are pristine.
+  ///
+  /// Pristine registers hold a value that is useless to the current function,
+  /// but that must be preserved - they are callee saved registers that are not
+  /// saved.
+  ///
+  /// Before the PrologueEpilogueInserter has placed the CSR spill code, this
+  /// method always returns an empty set.
+  BitVector getPristineRegs(const MachineFunction &MF) const;
+
+  /// Used by the MachineFunction printer to print information about
+  /// stack objects. Implemented in MachineFunction.cpp.
+  void print(const MachineFunction &MF, raw_ostream &OS) const;
+
+  /// dump - Print the function to stderr.
+  void dump(const MachineFunction &MF) const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineFunction.h b/linux-x64/clang/include/llvm/CodeGen/MachineFunction.h
new file mode 100644
index 0000000..7d8b7eb
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineFunction.h
@@ -0,0 +1,948 @@
+//===- llvm/CodeGen/MachineFunction.h ---------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Collect native machine code for a function.  This class contains a list of
+// MachineBasicBlock instances that make up the current compiled function.
+//
+// This class also contains pointers to various classes which hold
+// target-specific information about the generated code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEFUNCTION_H
+#define LLVM_CODEGEN_MACHINEFUNCTION_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Analysis/EHPersonalities.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/MC/MCDwarf.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/ArrayRecycler.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Recycler.h"
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class BasicBlock;
+class BlockAddress;
+class DataLayout;
+class DIExpression;
+class DILocalVariable;
+class DILocation;
+class Function;
+class GlobalValue;
+class MachineConstantPool;
+class MachineFrameInfo;
+class MachineFunction;
+class MachineJumpTableInfo;
+class MachineModuleInfo;
+class MachineRegisterInfo;
+class MCContext;
+class MCInstrDesc;
+class Pass;
+class PseudoSourceValueManager;
+class raw_ostream;
+class SlotIndexes;
+class TargetMachine;
+class TargetRegisterClass;
+class TargetSubtargetInfo;
+struct WinEHFuncInfo;
+
+template <> struct ilist_alloc_traits<MachineBasicBlock> {
+  void deleteNode(MachineBasicBlock *MBB);
+};
+
+template <> struct ilist_callback_traits<MachineBasicBlock> {
+  void addNodeToList(MachineBasicBlock* MBB);
+  void removeNodeFromList(MachineBasicBlock* MBB);
+
+  template <class Iterator>
+  void transferNodesFromList(ilist_callback_traits &OldList, Iterator, Iterator) {
+    llvm_unreachable("Never transfer between lists");
+  }
+};
+
+/// MachineFunctionInfo - This class can be derived from and used by targets to
+/// hold private target-specific information for each MachineFunction.  Objects
+/// of type are accessed/created with MF::getInfo and destroyed when the
+/// MachineFunction is destroyed.
+struct MachineFunctionInfo {
+  virtual ~MachineFunctionInfo();
+
+  /// \brief Factory function: default behavior is to call new using the
+  /// supplied allocator.
+  ///
+  /// This function can be overridden in a derive class.
+  template<typename Ty>
+  static Ty *create(BumpPtrAllocator &Allocator, MachineFunction &MF) {
+    return new (Allocator.Allocate<Ty>()) Ty(MF);
+  }
+};
+
+/// Properties which a MachineFunction may have at a given point in time.
+/// Each of these has checking code in the MachineVerifier, and passes can
+/// require that a property be set.
+class MachineFunctionProperties {
+  // Possible TODO: Allow targets to extend this (perhaps by allowing the
+  // constructor to specify the size of the bit vector)
+  // Possible TODO: Allow requiring the negative (e.g. VRegsAllocated could be
+  // stated as the negative of "has vregs"
+
+public:
+  // The properties are stated in "positive" form; i.e. a pass could require
+  // that the property hold, but not that it does not hold.
+
+  // Property descriptions:
+  // IsSSA: True when the machine function is in SSA form and virtual registers
+  //  have a single def.
+  // NoPHIs: The machine function does not contain any PHI instruction.
+  // TracksLiveness: True when tracking register liveness accurately.
+  //  While this property is set, register liveness information in basic block
+  //  live-in lists and machine instruction operands (e.g. kill flags, implicit
+  //  defs) is accurate. This means it can be used to change the code in ways
+  //  that affect the values in registers, for example by the register
+  //  scavenger.
+  //  When this property is clear, liveness is no longer reliable.
+  // NoVRegs: The machine function does not use any virtual registers.
+  // Legalized: In GlobalISel: the MachineLegalizer ran and all pre-isel generic
+  //  instructions have been legalized; i.e., all instructions are now one of:
+  //   - generic and always legal (e.g., COPY)
+  //   - target-specific
+  //   - legal pre-isel generic instructions.
+  // RegBankSelected: In GlobalISel: the RegBankSelect pass ran and all generic
+  //  virtual registers have been assigned to a register bank.
+  // Selected: In GlobalISel: the InstructionSelect pass ran and all pre-isel
+  //  generic instructions have been eliminated; i.e., all instructions are now
+  //  target-specific or non-pre-isel generic instructions (e.g., COPY).
+  //  Since only pre-isel generic instructions can have generic virtual register
+  //  operands, this also means that all generic virtual registers have been
+  //  constrained to virtual registers (assigned to register classes) and that
+  //  all sizes attached to them have been eliminated.
+  enum class Property : unsigned {
+    IsSSA,
+    NoPHIs,
+    TracksLiveness,
+    NoVRegs,
+    FailedISel,
+    Legalized,
+    RegBankSelected,
+    Selected,
+    LastProperty = Selected,
+  };
+
+  bool hasProperty(Property P) const {
+    return Properties[static_cast<unsigned>(P)];
+  }
+
+  MachineFunctionProperties &set(Property P) {
+    Properties.set(static_cast<unsigned>(P));
+    return *this;
+  }
+
+  MachineFunctionProperties &reset(Property P) {
+    Properties.reset(static_cast<unsigned>(P));
+    return *this;
+  }
+
+  /// Reset all the properties.
+  MachineFunctionProperties &reset() {
+    Properties.reset();
+    return *this;
+  }
+
+  MachineFunctionProperties &set(const MachineFunctionProperties &MFP) {
+    Properties |= MFP.Properties;
+    return *this;
+  }
+
+  MachineFunctionProperties &reset(const MachineFunctionProperties &MFP) {
+    Properties.reset(MFP.Properties);
+    return *this;
+  }
+
+  // Returns true if all properties set in V (i.e. required by a pass) are set
+  // in this.
+  bool verifyRequiredProperties(const MachineFunctionProperties &V) const {
+    return !V.Properties.test(Properties);
+  }
+
+  /// Print the MachineFunctionProperties in human-readable form.
+  void print(raw_ostream &OS) const;
+
+private:
+  BitVector Properties =
+      BitVector(static_cast<unsigned>(Property::LastProperty)+1);
+};
+
+struct SEHHandler {
+  /// Filter or finally function. Null indicates a catch-all.
+  const Function *FilterOrFinally;
+
+  /// Address of block to recover at. Null for a finally handler.
+  const BlockAddress *RecoverBA;
+};
+
+/// This structure is used to retain landing pad info for the current function.
+struct LandingPadInfo {
+  MachineBasicBlock *LandingPadBlock;      // Landing pad block.
+  SmallVector<MCSymbol *, 1> BeginLabels;  // Labels prior to invoke.
+  SmallVector<MCSymbol *, 1> EndLabels;    // Labels after invoke.
+  SmallVector<SEHHandler, 1> SEHHandlers;  // SEH handlers active at this lpad.
+  MCSymbol *LandingPadLabel = nullptr;     // Label at beginning of landing pad.
+  std::vector<int> TypeIds;                // List of type ids (filters negative).
+
+  explicit LandingPadInfo(MachineBasicBlock *MBB)
+      : LandingPadBlock(MBB) {}
+};
+
+class MachineFunction {
+  const Function &F;
+  const TargetMachine &Target;
+  const TargetSubtargetInfo *STI;
+  MCContext &Ctx;
+  MachineModuleInfo &MMI;
+
+  // RegInfo - Information about each register in use in the function.
+  MachineRegisterInfo *RegInfo;
+
+  // Used to keep track of target-specific per-machine function information for
+  // the target implementation.
+  MachineFunctionInfo *MFInfo;
+
+  // Keep track of objects allocated on the stack.
+  MachineFrameInfo *FrameInfo;
+
+  // Keep track of constants which are spilled to memory
+  MachineConstantPool *ConstantPool;
+
+  // Keep track of jump tables for switch instructions
+  MachineJumpTableInfo *JumpTableInfo;
+
+  // Keeps track of Windows exception handling related data. This will be null
+  // for functions that aren't using a funclet-based EH personality.
+  WinEHFuncInfo *WinEHInfo = nullptr;
+
+  // Function-level unique numbering for MachineBasicBlocks.  When a
+  // MachineBasicBlock is inserted into a MachineFunction is it automatically
+  // numbered and this vector keeps track of the mapping from ID's to MBB's.
+  std::vector<MachineBasicBlock*> MBBNumbering;
+
+  // Pool-allocate MachineFunction-lifetime and IR objects.
+  BumpPtrAllocator Allocator;
+
+  // Allocation management for instructions in function.
+  Recycler<MachineInstr> InstructionRecycler;
+
+  // Allocation management for operand arrays on instructions.
+  ArrayRecycler<MachineOperand> OperandRecycler;
+
+  // Allocation management for basic blocks in function.
+  Recycler<MachineBasicBlock> BasicBlockRecycler;
+
+  // List of machine basic blocks in function
+  using BasicBlockListType = ilist<MachineBasicBlock>;
+  BasicBlockListType BasicBlocks;
+
+  /// FunctionNumber - This provides a unique ID for each function emitted in
+  /// this translation unit.
+  ///
+  unsigned FunctionNumber;
+
+  /// Alignment - The alignment of the function.
+  unsigned Alignment;
+
+  /// ExposesReturnsTwice - True if the function calls setjmp or related
+  /// functions with attribute "returns twice", but doesn't have
+  /// the attribute itself.
+  /// This is used to limit optimizations which cannot reason
+  /// about the control flow of such functions.
+  bool ExposesReturnsTwice = false;
+
+  /// True if the function includes any inline assembly.
+  bool HasInlineAsm = false;
+
+  /// True if any WinCFI instruction have been emitted in this function.
+  Optional<bool> HasWinCFI;
+
+  /// Current high-level properties of the IR of the function (e.g. is in SSA
+  /// form or whether registers have been allocated)
+  MachineFunctionProperties Properties;
+
+  // Allocation management for pseudo source values.
+  std::unique_ptr<PseudoSourceValueManager> PSVManager;
+
+  /// List of moves done by a function's prolog.  Used to construct frame maps
+  /// by debug and exception handling consumers.
+  std::vector<MCCFIInstruction> FrameInstructions;
+
+  /// \name Exception Handling
+  /// \{
+
+  /// List of LandingPadInfo describing the landing pad information.
+  std::vector<LandingPadInfo> LandingPads;
+
+  /// Map a landing pad's EH symbol to the call site indexes.
+  DenseMap<MCSymbol*, SmallVector<unsigned, 4>> LPadToCallSiteMap;
+
+  /// Map of invoke call site index values to associated begin EH_LABEL.
+  DenseMap<MCSymbol*, unsigned> CallSiteMap;
+
+  /// CodeView label annotations.
+  std::vector<std::pair<MCSymbol *, MDNode *>> CodeViewAnnotations;
+
+  bool CallsEHReturn = false;
+  bool CallsUnwindInit = false;
+  bool HasEHFunclets = false;
+
+  /// List of C++ TypeInfo used.
+  std::vector<const GlobalValue *> TypeInfos;
+
+  /// List of typeids encoding filters used.
+  std::vector<unsigned> FilterIds;
+
+  /// List of the indices in FilterIds corresponding to filter terminators.
+  std::vector<unsigned> FilterEnds;
+
+  EHPersonality PersonalityTypeCache = EHPersonality::Unknown;
+
+  /// \}
+
+  /// Clear all the members of this MachineFunction, but the ones used
+  /// to initialize again the MachineFunction.
+  /// More specifically, this deallocates all the dynamically allocated
+  /// objects and get rid of all the XXXInfo data structure, but keep
+  /// unchanged the references to Fn, Target, MMI, and FunctionNumber.
+  void clear();
+  /// Allocate and initialize the different members.
+  /// In particular, the XXXInfo data structure.
+  /// \pre Fn, Target, MMI, and FunctionNumber are properly set.
+  void init();
+
+public:
+  struct VariableDbgInfo {
+    const DILocalVariable *Var;
+    const DIExpression *Expr;
+    unsigned Slot;
+    const DILocation *Loc;
+
+    VariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
+                    unsigned Slot, const DILocation *Loc)
+        : Var(Var), Expr(Expr), Slot(Slot), Loc(Loc) {}
+  };
+  using VariableDbgInfoMapTy = SmallVector<VariableDbgInfo, 4>;
+  VariableDbgInfoMapTy VariableDbgInfos;
+
+  MachineFunction(const Function &F, const TargetMachine &TM,
+                  const TargetSubtargetInfo &STI, unsigned FunctionNum,
+                  MachineModuleInfo &MMI);
+  MachineFunction(const MachineFunction &) = delete;
+  MachineFunction &operator=(const MachineFunction &) = delete;
+  ~MachineFunction();
+
+  /// Reset the instance as if it was just created.
+  void reset() {
+    clear();
+    init();
+  }
+
+  MachineModuleInfo &getMMI() const { return MMI; }
+  MCContext &getContext() const { return Ctx; }
+
+  PseudoSourceValueManager &getPSVManager() const { return *PSVManager; }
+
+  /// Return the DataLayout attached to the Module associated to this MF.
+  const DataLayout &getDataLayout() const;
+
+  /// Return the LLVM function that this machine code represents
+  const Function &getFunction() const { return F; }
+
+  /// getName - Return the name of the corresponding LLVM function.
+  StringRef getName() const;
+
+  /// getFunctionNumber - Return a unique ID for the current function.
+  unsigned getFunctionNumber() const { return FunctionNumber; }
+
+  /// getTarget - Return the target machine this machine code is compiled with
+  const TargetMachine &getTarget() const { return Target; }
+
+  /// getSubtarget - Return the subtarget for which this machine code is being
+  /// compiled.
+  const TargetSubtargetInfo &getSubtarget() const { return *STI; }
+  void setSubtarget(const TargetSubtargetInfo *ST) { STI = ST; }
+
+  /// getSubtarget - This method returns a pointer to the specified type of
+  /// TargetSubtargetInfo.  In debug builds, it verifies that the object being
+  /// returned is of the correct type.
+  template<typename STC> const STC &getSubtarget() const {
+    return *static_cast<const STC *>(STI);
+  }
+
+  /// getRegInfo - Return information about the registers currently in use.
+  MachineRegisterInfo &getRegInfo() { return *RegInfo; }
+  const MachineRegisterInfo &getRegInfo() const { return *RegInfo; }
+
+  /// getFrameInfo - Return the frame info object for the current function.
+  /// This object contains information about objects allocated on the stack
+  /// frame of the current function in an abstract way.
+  MachineFrameInfo &getFrameInfo() { return *FrameInfo; }
+  const MachineFrameInfo &getFrameInfo() const { return *FrameInfo; }
+
+  /// getJumpTableInfo - Return the jump table info object for the current
+  /// function.  This object contains information about jump tables in the
+  /// current function.  If the current function has no jump tables, this will
+  /// return null.
+  const MachineJumpTableInfo *getJumpTableInfo() const { return JumpTableInfo; }
+  MachineJumpTableInfo *getJumpTableInfo() { return JumpTableInfo; }
+
+  /// getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it
+  /// does already exist, allocate one.
+  MachineJumpTableInfo *getOrCreateJumpTableInfo(unsigned JTEntryKind);
+
+  /// getConstantPool - Return the constant pool object for the current
+  /// function.
+  MachineConstantPool *getConstantPool() { return ConstantPool; }
+  const MachineConstantPool *getConstantPool() const { return ConstantPool; }
+
+  /// getWinEHFuncInfo - Return information about how the current function uses
+  /// Windows exception handling. Returns null for functions that don't use
+  /// funclets for exception handling.
+  const WinEHFuncInfo *getWinEHFuncInfo() const { return WinEHInfo; }
+  WinEHFuncInfo *getWinEHFuncInfo() { return WinEHInfo; }
+
+  /// getAlignment - Return the alignment (log2, not bytes) of the function.
+  unsigned getAlignment() const { return Alignment; }
+
+  /// setAlignment - Set the alignment (log2, not bytes) of the function.
+  void setAlignment(unsigned A) { Alignment = A; }
+
+  /// ensureAlignment - Make sure the function is at least 1 << A bytes aligned.
+  void ensureAlignment(unsigned A) {
+    if (Alignment < A) Alignment = A;
+  }
+
+  /// exposesReturnsTwice - Returns true if the function calls setjmp or
+  /// any other similar functions with attribute "returns twice" without
+  /// having the attribute itself.
+  bool exposesReturnsTwice() const {
+    return ExposesReturnsTwice;
+  }
+
+  /// setCallsSetJmp - Set a flag that indicates if there's a call to
+  /// a "returns twice" function.
+  void setExposesReturnsTwice(bool B) {
+    ExposesReturnsTwice = B;
+  }
+
+  /// Returns true if the function contains any inline assembly.
+  bool hasInlineAsm() const {
+    return HasInlineAsm;
+  }
+
+  /// Set a flag that indicates that the function contains inline assembly.
+  void setHasInlineAsm(bool B) {
+    HasInlineAsm = B;
+  }
+
+  bool hasWinCFI() const {
+    assert(HasWinCFI.hasValue() && "HasWinCFI not set yet!");
+    return *HasWinCFI;
+  }
+  void setHasWinCFI(bool v) { HasWinCFI = v; }
+
+  /// Get the function properties
+  const MachineFunctionProperties &getProperties() const { return Properties; }
+  MachineFunctionProperties &getProperties() { return Properties; }
+
+  /// getInfo - Keep track of various per-function pieces of information for
+  /// backends that would like to do so.
+  ///
+  template<typename Ty>
+  Ty *getInfo() {
+    if (!MFInfo)
+      MFInfo = Ty::template create<Ty>(Allocator, *this);
+    return static_cast<Ty*>(MFInfo);
+  }
+
+  template<typename Ty>
+  const Ty *getInfo() const {
+     return const_cast<MachineFunction*>(this)->getInfo<Ty>();
+  }
+
+  /// getBlockNumbered - MachineBasicBlocks are automatically numbered when they
+  /// are inserted into the machine function.  The block number for a machine
+  /// basic block can be found by using the MBB::getNumber method, this method
+  /// provides the inverse mapping.
+  MachineBasicBlock *getBlockNumbered(unsigned N) const {
+    assert(N < MBBNumbering.size() && "Illegal block number");
+    assert(MBBNumbering[N] && "Block was removed from the machine function!");
+    return MBBNumbering[N];
+  }
+
+  /// Should we be emitting segmented stack stuff for the function
+  bool shouldSplitStack() const;
+
+  /// getNumBlockIDs - Return the number of MBB ID's allocated.
+  unsigned getNumBlockIDs() const { return (unsigned)MBBNumbering.size(); }
+
+  /// RenumberBlocks - This discards all of the MachineBasicBlock numbers and
+  /// recomputes them.  This guarantees that the MBB numbers are sequential,
+  /// dense, and match the ordering of the blocks within the function.  If a
+  /// specific MachineBasicBlock is specified, only that block and those after
+  /// it are renumbered.
+  void RenumberBlocks(MachineBasicBlock *MBBFrom = nullptr);
+
+  /// print - Print out the MachineFunction in a format suitable for debugging
+  /// to the specified stream.
+  void print(raw_ostream &OS, const SlotIndexes* = nullptr) const;
+
+  /// viewCFG - This function is meant for use from the debugger.  You can just
+  /// say 'call F->viewCFG()' and a ghostview window should pop up from the
+  /// program, displaying the CFG of the current function with the code for each
+  /// basic block inside.  This depends on there being a 'dot' and 'gv' program
+  /// in your path.
+  void viewCFG() const;
+
+  /// viewCFGOnly - This function is meant for use from the debugger.  It works
+  /// just like viewCFG, but it does not include the contents of basic blocks
+  /// into the nodes, just the label.  If you are only interested in the CFG
+  /// this can make the graph smaller.
+  ///
+  void viewCFGOnly() const;
+
+  /// dump - Print the current MachineFunction to cerr, useful for debugger use.
+  void dump() const;
+
+  /// Run the current MachineFunction through the machine code verifier, useful
+  /// for debugger use.
+  /// \returns true if no problems were found.
+  bool verify(Pass *p = nullptr, const char *Banner = nullptr,
+              bool AbortOnError = true) const;
+
+  // Provide accessors for the MachineBasicBlock list...
+  using iterator = BasicBlockListType::iterator;
+  using const_iterator = BasicBlockListType::const_iterator;
+  using const_reverse_iterator = BasicBlockListType::const_reverse_iterator;
+  using reverse_iterator = BasicBlockListType::reverse_iterator;
+
+  /// Support for MachineBasicBlock::getNextNode().
+  static BasicBlockListType MachineFunction::*
+  getSublistAccess(MachineBasicBlock *) {
+    return &MachineFunction::BasicBlocks;
+  }
+
+  /// addLiveIn - Add the specified physical register as a live-in value and
+  /// create a corresponding virtual register for it.
+  unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC);
+
+  //===--------------------------------------------------------------------===//
+  // BasicBlock accessor functions.
+  //
+  iterator                 begin()       { return BasicBlocks.begin(); }
+  const_iterator           begin() const { return BasicBlocks.begin(); }
+  iterator                 end  ()       { return BasicBlocks.end();   }
+  const_iterator           end  () const { return BasicBlocks.end();   }
+
+  reverse_iterator        rbegin()       { return BasicBlocks.rbegin(); }
+  const_reverse_iterator  rbegin() const { return BasicBlocks.rbegin(); }
+  reverse_iterator        rend  ()       { return BasicBlocks.rend();   }
+  const_reverse_iterator  rend  () const { return BasicBlocks.rend();   }
+
+  unsigned                  size() const { return (unsigned)BasicBlocks.size();}
+  bool                     empty() const { return BasicBlocks.empty(); }
+  const MachineBasicBlock &front() const { return BasicBlocks.front(); }
+        MachineBasicBlock &front()       { return BasicBlocks.front(); }
+  const MachineBasicBlock & back() const { return BasicBlocks.back(); }
+        MachineBasicBlock & back()       { return BasicBlocks.back(); }
+
+  void push_back (MachineBasicBlock *MBB) { BasicBlocks.push_back (MBB); }
+  void push_front(MachineBasicBlock *MBB) { BasicBlocks.push_front(MBB); }
+  void insert(iterator MBBI, MachineBasicBlock *MBB) {
+    BasicBlocks.insert(MBBI, MBB);
+  }
+  void splice(iterator InsertPt, iterator MBBI) {
+    BasicBlocks.splice(InsertPt, BasicBlocks, MBBI);
+  }
+  void splice(iterator InsertPt, MachineBasicBlock *MBB) {
+    BasicBlocks.splice(InsertPt, BasicBlocks, MBB);
+  }
+  void splice(iterator InsertPt, iterator MBBI, iterator MBBE) {
+    BasicBlocks.splice(InsertPt, BasicBlocks, MBBI, MBBE);
+  }
+
+  void remove(iterator MBBI) { BasicBlocks.remove(MBBI); }
+  void remove(MachineBasicBlock *MBBI) { BasicBlocks.remove(MBBI); }
+  void erase(iterator MBBI) { BasicBlocks.erase(MBBI); }
+  void erase(MachineBasicBlock *MBBI) { BasicBlocks.erase(MBBI); }
+
+  template <typename Comp>
+  void sort(Comp comp) {
+    BasicBlocks.sort(comp);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Internal functions used to automatically number MachineBasicBlocks
+
+  /// \brief Adds the MBB to the internal numbering. Returns the unique number
+  /// assigned to the MBB.
+  unsigned addToMBBNumbering(MachineBasicBlock *MBB) {
+    MBBNumbering.push_back(MBB);
+    return (unsigned)MBBNumbering.size()-1;
+  }
+
+  /// removeFromMBBNumbering - Remove the specific machine basic block from our
+  /// tracker, this is only really to be used by the MachineBasicBlock
+  /// implementation.
+  void removeFromMBBNumbering(unsigned N) {
+    assert(N < MBBNumbering.size() && "Illegal basic block #");
+    MBBNumbering[N] = nullptr;
+  }
+
+  /// CreateMachineInstr - Allocate a new MachineInstr. Use this instead
+  /// of `new MachineInstr'.
+  MachineInstr *CreateMachineInstr(const MCInstrDesc &MCID, const DebugLoc &DL,
+                                   bool NoImp = false);
+
+  /// Create a new MachineInstr which is a copy of \p Orig, identical in all
+  /// ways except the instruction has no parent, prev, or next. Bundling flags
+  /// are reset.
+  ///
+  /// Note: Clones a single instruction, not whole instruction bundles.
+  /// Does not perform target specific adjustments; consider using
+  /// TargetInstrInfo::duplicate() instead.
+  MachineInstr *CloneMachineInstr(const MachineInstr *Orig);
+
+  /// Clones instruction or the whole instruction bundle \p Orig and insert
+  /// into \p MBB before \p InsertBefore.
+  ///
+  /// Note: Does not perform target specific adjustments; consider using
+  /// TargetInstrInfo::duplicate() intead.
+  MachineInstr &CloneMachineInstrBundle(MachineBasicBlock &MBB,
+      MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig);
+
+  /// DeleteMachineInstr - Delete the given MachineInstr.
+  void DeleteMachineInstr(MachineInstr *MI);
+
+  /// CreateMachineBasicBlock - Allocate a new MachineBasicBlock. Use this
+  /// instead of `new MachineBasicBlock'.
+  MachineBasicBlock *CreateMachineBasicBlock(const BasicBlock *bb = nullptr);
+
+  /// DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
+  void DeleteMachineBasicBlock(MachineBasicBlock *MBB);
+
+  /// getMachineMemOperand - Allocate a new MachineMemOperand.
+  /// MachineMemOperands are owned by the MachineFunction and need not be
+  /// explicitly deallocated.
+  MachineMemOperand *getMachineMemOperand(
+      MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
+      unsigned base_alignment, const AAMDNodes &AAInfo = AAMDNodes(),
+      const MDNode *Ranges = nullptr,
+      SyncScope::ID SSID = SyncScope::System,
+      AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
+      AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
+
+  /// getMachineMemOperand - Allocate a new MachineMemOperand by copying
+  /// an existing one, adjusting by an offset and using the given size.
+  /// MachineMemOperands are owned by the MachineFunction and need not be
+  /// explicitly deallocated.
+  MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
+                                          int64_t Offset, uint64_t Size);
+
+  /// Allocate a new MachineMemOperand by copying an existing one,
+  /// replacing only AliasAnalysis information. MachineMemOperands are owned
+  /// by the MachineFunction and need not be explicitly deallocated.
+  MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
+                                          const AAMDNodes &AAInfo);
+
+  using OperandCapacity = ArrayRecycler<MachineOperand>::Capacity;
+
+  /// Allocate an array of MachineOperands. This is only intended for use by
+  /// internal MachineInstr functions.
+  MachineOperand *allocateOperandArray(OperandCapacity Cap) {
+    return OperandRecycler.allocate(Cap, Allocator);
+  }
+
+  /// Dellocate an array of MachineOperands and recycle the memory. This is
+  /// only intended for use by internal MachineInstr functions.
+  /// Cap must be the same capacity that was used to allocate the array.
+  void deallocateOperandArray(OperandCapacity Cap, MachineOperand *Array) {
+    OperandRecycler.deallocate(Cap, Array);
+  }
+
+  /// \brief Allocate and initialize a register mask with @p NumRegister bits.
+  uint32_t *allocateRegisterMask(unsigned NumRegister) {
+    unsigned Size = (NumRegister + 31) / 32;
+    uint32_t *Mask = Allocator.Allocate<uint32_t>(Size);
+    for (unsigned i = 0; i != Size; ++i)
+      Mask[i] = 0;
+    return Mask;
+  }
+
+  /// allocateMemRefsArray - Allocate an array to hold MachineMemOperand
+  /// pointers.  This array is owned by the MachineFunction.
+  MachineInstr::mmo_iterator allocateMemRefsArray(unsigned long Num);
+
+  /// extractLoadMemRefs - Allocate an array and populate it with just the
+  /// load information from the given MachineMemOperand sequence.
+  std::pair<MachineInstr::mmo_iterator,
+            MachineInstr::mmo_iterator>
+    extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
+                       MachineInstr::mmo_iterator End);
+
+  /// extractStoreMemRefs - Allocate an array and populate it with just the
+  /// store information from the given MachineMemOperand sequence.
+  std::pair<MachineInstr::mmo_iterator,
+            MachineInstr::mmo_iterator>
+    extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
+                        MachineInstr::mmo_iterator End);
+
+  /// Allocate a string and populate it with the given external symbol name.
+  const char *createExternalSymbolName(StringRef Name);
+
+  //===--------------------------------------------------------------------===//
+  // Label Manipulation.
+
+  /// getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
+  /// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
+  /// normal 'L' label is returned.
+  MCSymbol *getJTISymbol(unsigned JTI, MCContext &Ctx,
+                         bool isLinkerPrivate = false) const;
+
+  /// getPICBaseSymbol - Return a function-local symbol to represent the PIC
+  /// base.
+  MCSymbol *getPICBaseSymbol() const;
+
+  /// Returns a reference to a list of cfi instructions in the function's
+  /// prologue.  Used to construct frame maps for debug and exception handling
+  /// comsumers.
+  const std::vector<MCCFIInstruction> &getFrameInstructions() const {
+    return FrameInstructions;
+  }
+
+  LLVM_NODISCARD unsigned addFrameInst(const MCCFIInstruction &Inst) {
+    FrameInstructions.push_back(Inst);
+    return FrameInstructions.size() - 1;
+  }
+
+  /// \name Exception Handling
+  /// \{
+
+  bool callsEHReturn() const { return CallsEHReturn; }
+  void setCallsEHReturn(bool b) { CallsEHReturn = b; }
+
+  bool callsUnwindInit() const { return CallsUnwindInit; }
+  void setCallsUnwindInit(bool b) { CallsUnwindInit = b; }
+
+  bool hasEHFunclets() const { return HasEHFunclets; }
+  void setHasEHFunclets(bool V) { HasEHFunclets = V; }
+
+  /// Find or create an LandingPadInfo for the specified MachineBasicBlock.
+  LandingPadInfo &getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad);
+
+  /// Remap landing pad labels and remove any deleted landing pads.
+  void tidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap = nullptr);
+
+  /// Return a reference to the landing pad info for the current function.
+  const std::vector<LandingPadInfo> &getLandingPads() const {
+    return LandingPads;
+  }
+
+  /// Provide the begin and end labels of an invoke style call and associate it
+  /// with a try landing pad block.
+  void addInvoke(MachineBasicBlock *LandingPad,
+                 MCSymbol *BeginLabel, MCSymbol *EndLabel);
+
+  /// Add a new panding pad.  Returns the label ID for the landing pad entry.
+  MCSymbol *addLandingPad(MachineBasicBlock *LandingPad);
+
+  /// Provide the catch typeinfo for a landing pad.
+  void addCatchTypeInfo(MachineBasicBlock *LandingPad,
+                        ArrayRef<const GlobalValue *> TyInfo);
+
+  /// Provide the filter typeinfo for a landing pad.
+  void addFilterTypeInfo(MachineBasicBlock *LandingPad,
+                         ArrayRef<const GlobalValue *> TyInfo);
+
+  /// Add a cleanup action for a landing pad.
+  void addCleanup(MachineBasicBlock *LandingPad);
+
+  void addSEHCatchHandler(MachineBasicBlock *LandingPad, const Function *Filter,
+                          const BlockAddress *RecoverLabel);
+
+  void addSEHCleanupHandler(MachineBasicBlock *LandingPad,
+                            const Function *Cleanup);
+
+  /// Return the type id for the specified typeinfo.  This is function wide.
+  unsigned getTypeIDFor(const GlobalValue *TI);
+
+  /// Return the id of the filter encoded by TyIds.  This is function wide.
+  int getFilterIDFor(std::vector<unsigned> &TyIds);
+
+  /// Map the landing pad's EH symbol to the call site indexes.
+  void setCallSiteLandingPad(MCSymbol *Sym, ArrayRef<unsigned> Sites);
+
+  /// Get the call site indexes for a landing pad EH symbol.
+  SmallVectorImpl<unsigned> &getCallSiteLandingPad(MCSymbol *Sym) {
+    assert(hasCallSiteLandingPad(Sym) &&
+           "missing call site number for landing pad!");
+    return LPadToCallSiteMap[Sym];
+  }
+
+  /// Return true if the landing pad Eh symbol has an associated call site.
+  bool hasCallSiteLandingPad(MCSymbol *Sym) {
+    return !LPadToCallSiteMap[Sym].empty();
+  }
+
+  /// Map the begin label for a call site.
+  void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site) {
+    CallSiteMap[BeginLabel] = Site;
+  }
+
+  /// Get the call site number for a begin label.
+  unsigned getCallSiteBeginLabel(MCSymbol *BeginLabel) const {
+    assert(hasCallSiteBeginLabel(BeginLabel) &&
+           "Missing call site number for EH_LABEL!");
+    return CallSiteMap.lookup(BeginLabel);
+  }
+
+  /// Return true if the begin label has a call site number associated with it.
+  bool hasCallSiteBeginLabel(MCSymbol *BeginLabel) const {
+    return CallSiteMap.count(BeginLabel);
+  }
+
+  /// Record annotations associated with a particular label.
+  void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD) {
+    CodeViewAnnotations.push_back({Label, MD});
+  }
+
+  ArrayRef<std::pair<MCSymbol *, MDNode *>> getCodeViewAnnotations() const {
+    return CodeViewAnnotations;
+  }
+
+  /// Return a reference to the C++ typeinfo for the current function.
+  const std::vector<const GlobalValue *> &getTypeInfos() const {
+    return TypeInfos;
+  }
+
+  /// Return a reference to the typeids encoding filters used in the current
+  /// function.
+  const std::vector<unsigned> &getFilterIds() const {
+    return FilterIds;
+  }
+
+  /// \}
+
+  /// Collect information used to emit debugging information of a variable.
+  void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
+                          unsigned Slot, const DILocation *Loc) {
+    VariableDbgInfos.emplace_back(Var, Expr, Slot, Loc);
+  }
+
+  VariableDbgInfoMapTy &getVariableDbgInfo() { return VariableDbgInfos; }
+  const VariableDbgInfoMapTy &getVariableDbgInfo() const {
+    return VariableDbgInfos;
+  }
+};
+
+/// \name Exception Handling
+/// \{
+
+/// Extract the exception handling information from the landingpad instruction
+/// and add them to the specified machine module info.
+void addLandingPadInfo(const LandingPadInst &I, MachineBasicBlock &MBB);
+
+/// \}
+
+//===--------------------------------------------------------------------===//
+// GraphTraits specializations for function basic block graphs (CFGs)
+//===--------------------------------------------------------------------===//
+
+// Provide specializations of GraphTraits to be able to treat a
+// machine function as a graph of machine basic blocks... these are
+// the same as the machine basic block iterators, except that the root
+// node is implicitly the first node of the function.
+//
+template <> struct GraphTraits<MachineFunction*> :
+  public GraphTraits<MachineBasicBlock*> {
+  static NodeRef getEntryNode(MachineFunction *F) { return &F->front(); }
+
+  // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+  using nodes_iterator = pointer_iterator<MachineFunction::iterator>;
+
+  static nodes_iterator nodes_begin(MachineFunction *F) {
+    return nodes_iterator(F->begin());
+  }
+
+  static nodes_iterator nodes_end(MachineFunction *F) {
+    return nodes_iterator(F->end());
+  }
+
+  static unsigned       size       (MachineFunction *F) { return F->size(); }
+};
+template <> struct GraphTraits<const MachineFunction*> :
+  public GraphTraits<const MachineBasicBlock*> {
+  static NodeRef getEntryNode(const MachineFunction *F) { return &F->front(); }
+
+  // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+  using nodes_iterator = pointer_iterator<MachineFunction::const_iterator>;
+
+  static nodes_iterator nodes_begin(const MachineFunction *F) {
+    return nodes_iterator(F->begin());
+  }
+
+  static nodes_iterator nodes_end  (const MachineFunction *F) {
+    return nodes_iterator(F->end());
+  }
+
+  static unsigned       size       (const MachineFunction *F)  {
+    return F->size();
+  }
+};
+
+// Provide specializations of GraphTraits to be able to treat a function as a
+// graph of basic blocks... and to walk it in inverse order.  Inverse order for
+// a function is considered to be when traversing the predecessor edges of a BB
+// instead of the successor edges.
+//
+template <> struct GraphTraits<Inverse<MachineFunction*>> :
+  public GraphTraits<Inverse<MachineBasicBlock*>> {
+  static NodeRef getEntryNode(Inverse<MachineFunction *> G) {
+    return &G.Graph->front();
+  }
+};
+template <> struct GraphTraits<Inverse<const MachineFunction*>> :
+  public GraphTraits<Inverse<const MachineBasicBlock*>> {
+  static NodeRef getEntryNode(Inverse<const MachineFunction *> G) {
+    return &G.Graph->front();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEFUNCTION_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineFunctionPass.h b/linux-x64/clang/include/llvm/CodeGen/MachineFunctionPass.h
new file mode 100644
index 0000000..6d978da
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineFunctionPass.h
@@ -0,0 +1,81 @@
+//===-- MachineFunctionPass.h - Pass for MachineFunctions --------*-C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MachineFunctionPass class.  MachineFunctionPass's are
+// just FunctionPass's, except they operate on machine code as part of a code
+// generator.  Because they operate on machine code, not the LLVM
+// representation, MachineFunctionPass's are not allowed to modify the LLVM
+// representation.  Due to this limitation, the MachineFunctionPass class takes
+// care of declaring that no LLVM passes are invalidated.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEFUNCTIONPASS_H
+#define LLVM_CODEGEN_MACHINEFUNCTIONPASS_H
+
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+/// MachineFunctionPass - This class adapts the FunctionPass interface to
+/// allow convenient creation of passes that operate on the MachineFunction
+/// representation. Instead of overriding runOnFunction, subclasses
+/// override runOnMachineFunction.
+class MachineFunctionPass : public FunctionPass {
+public:
+  bool doInitialization(Module&) override {
+    // Cache the properties info at module-init time so we don't have to
+    // construct them for every function.
+    RequiredProperties = getRequiredProperties();
+    SetProperties = getSetProperties();
+    ClearedProperties = getClearedProperties();
+    return false;
+  }
+protected:
+  explicit MachineFunctionPass(char &ID) : FunctionPass(ID) {}
+
+  /// runOnMachineFunction - This method must be overloaded to perform the
+  /// desired machine code transformation or analysis.
+  ///
+  virtual bool runOnMachineFunction(MachineFunction &MF) = 0;
+
+  /// getAnalysisUsage - Subclasses that override getAnalysisUsage
+  /// must call this.
+  ///
+  /// For MachineFunctionPasses, calling AU.preservesCFG() indicates that
+  /// the pass does not modify the MachineBasicBlock CFG.
+  ///
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  virtual MachineFunctionProperties getRequiredProperties() const {
+    return MachineFunctionProperties();
+  }
+  virtual MachineFunctionProperties getSetProperties() const {
+    return MachineFunctionProperties();
+  }
+  virtual MachineFunctionProperties getClearedProperties() const {
+    return MachineFunctionProperties();
+  }
+
+private:
+  MachineFunctionProperties RequiredProperties;
+  MachineFunctionProperties SetProperties;
+  MachineFunctionProperties ClearedProperties;
+
+  /// createPrinterPass - Get a machine function printer pass.
+  Pass *createPrinterPass(raw_ostream &O,
+                          const std::string &Banner) const override;
+
+  bool runOnFunction(Function &F) override;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineInstr.h b/linux-x64/clang/include/llvm/CodeGen/MachineInstr.h
new file mode 100644
index 0000000..ea94be0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineInstr.h
@@ -0,0 +1,1407 @@
+//===- llvm/CodeGen/MachineInstr.h - MachineInstr class ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MachineInstr class, which is the
+// basic representation for all target dependent machine instructions used by
+// the back end.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEINSTR_H
+#define LLVM_CODEGEN_MACHINEINSTR_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/Support/ArrayRecycler.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+class DIExpression;
+class DILocalVariable;
+class MachineBasicBlock;
+class MachineFunction;
+class MachineMemOperand;
+class MachineRegisterInfo;
+class ModuleSlotTracker;
+class raw_ostream;
+template <typename T> class SmallVectorImpl;
+class SmallBitVector;
+class StringRef;
+class TargetInstrInfo;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+//===----------------------------------------------------------------------===//
+/// Representation of each machine instruction.
+///
+/// This class isn't a POD type, but it must have a trivial destructor. When a
+/// MachineFunction is deleted, all the contained MachineInstrs are deallocated
+/// without having their destructor called.
+///
+class MachineInstr
+    : public ilist_node_with_parent<MachineInstr, MachineBasicBlock,
+                                    ilist_sentinel_tracking<true>> {
+public:
+  using mmo_iterator = MachineMemOperand **;
+
+  /// Flags to specify different kinds of comments to output in
+  /// assembly code.  These flags carry semantic information not
+  /// otherwise easily derivable from the IR text.
+  ///
+  enum CommentFlag {
+    ReloadReuse = 0x1,    // higher bits are reserved for target dep comments.
+    NoSchedComment = 0x2,
+    TAsmComments = 0x4    // Target Asm comments should start from this value.
+  };
+
+  enum MIFlag {
+    NoFlags      = 0,
+    FrameSetup   = 1 << 0,              // Instruction is used as a part of
+                                        // function frame setup code.
+    FrameDestroy = 1 << 1,              // Instruction is used as a part of
+                                        // function frame destruction code.
+    BundledPred  = 1 << 2,              // Instruction has bundled predecessors.
+    BundledSucc  = 1 << 3               // Instruction has bundled successors.
+  };
+
+private:
+  const MCInstrDesc *MCID;              // Instruction descriptor.
+  MachineBasicBlock *Parent = nullptr;  // Pointer to the owning basic block.
+
+  // Operands are allocated by an ArrayRecycler.
+  MachineOperand *Operands = nullptr;   // Pointer to the first operand.
+  unsigned NumOperands = 0;             // Number of operands on instruction.
+  using OperandCapacity = ArrayRecycler<MachineOperand>::Capacity;
+  OperandCapacity CapOperands;          // Capacity of the Operands array.
+
+  uint8_t Flags = 0;                    // Various bits of additional
+                                        // information about machine
+                                        // instruction.
+
+  uint8_t AsmPrinterFlags = 0;          // Various bits of information used by
+                                        // the AsmPrinter to emit helpful
+                                        // comments.  This is *not* semantic
+                                        // information.  Do not use this for
+                                        // anything other than to convey comment
+                                        // information to AsmPrinter.
+
+  uint8_t NumMemRefs = 0;               // Information on memory references.
+  // Note that MemRefs == nullptr,  means 'don't know', not 'no memory access'.
+  // Calling code must treat missing information conservatively.  If the number
+  // of memory operands required to be precise exceeds the maximum value of
+  // NumMemRefs - currently 256 - we remove the operands entirely. Note also
+  // that this is a non-owning reference to a shared copy on write buffer owned
+  // by the MachineFunction and created via MF.allocateMemRefsArray.
+  mmo_iterator MemRefs = nullptr;
+
+  DebugLoc debugLoc;                    // Source line information.
+
+  // Intrusive list support
+  friend struct ilist_traits<MachineInstr>;
+  friend struct ilist_callback_traits<MachineBasicBlock>;
+  void setParent(MachineBasicBlock *P) { Parent = P; }
+
+  /// This constructor creates a copy of the given
+  /// MachineInstr in the given MachineFunction.
+  MachineInstr(MachineFunction &, const MachineInstr &);
+
+  /// This constructor create a MachineInstr and add the implicit operands.
+  /// It reserves space for number of operands specified by
+  /// MCInstrDesc.  An explicit DebugLoc is supplied.
+  MachineInstr(MachineFunction &, const MCInstrDesc &MCID, DebugLoc dl,
+               bool NoImp = false);
+
+  // MachineInstrs are pool-allocated and owned by MachineFunction.
+  friend class MachineFunction;
+
+public:
+  MachineInstr(const MachineInstr &) = delete;
+  MachineInstr &operator=(const MachineInstr &) = delete;
+  // Use MachineFunction::DeleteMachineInstr() instead.
+  ~MachineInstr() = delete;
+
+  const MachineBasicBlock* getParent() const { return Parent; }
+  MachineBasicBlock* getParent() { return Parent; }
+
+  /// Return the function that contains the basic block that this instruction
+  /// belongs to.
+  ///
+  /// Note: this is undefined behaviour if the instruction does not have a
+  /// parent.
+  const MachineFunction *getMF() const;
+  MachineFunction *getMF() {
+    return const_cast<MachineFunction *>(
+        static_cast<const MachineInstr *>(this)->getMF());
+  }
+
+  /// Return the asm printer flags bitvector.
+  uint8_t getAsmPrinterFlags() const { return AsmPrinterFlags; }
+
+  /// Clear the AsmPrinter bitvector.
+  void clearAsmPrinterFlags() { AsmPrinterFlags = 0; }
+
+  /// Return whether an AsmPrinter flag is set.
+  bool getAsmPrinterFlag(CommentFlag Flag) const {
+    return AsmPrinterFlags & Flag;
+  }
+
+  /// Set a flag for the AsmPrinter.
+  void setAsmPrinterFlag(uint8_t Flag) {
+    AsmPrinterFlags |= Flag;
+  }
+
+  /// Clear specific AsmPrinter flags.
+  void clearAsmPrinterFlag(CommentFlag Flag) {
+    AsmPrinterFlags &= ~Flag;
+  }
+
+  /// Return the MI flags bitvector.
+  uint8_t getFlags() const {
+    return Flags;
+  }
+
+  /// Return whether an MI flag is set.
+  bool getFlag(MIFlag Flag) const {
+    return Flags & Flag;
+  }
+
+  /// Set a MI flag.
+  void setFlag(MIFlag Flag) {
+    Flags |= (uint8_t)Flag;
+  }
+
+  void setFlags(unsigned flags) {
+    // Filter out the automatically maintained flags.
+    unsigned Mask = BundledPred | BundledSucc;
+    Flags = (Flags & Mask) | (flags & ~Mask);
+  }
+
+  /// clearFlag - Clear a MI flag.
+  void clearFlag(MIFlag Flag) {
+    Flags &= ~((uint8_t)Flag);
+  }
+
+  /// Return true if MI is in a bundle (but not the first MI in a bundle).
+  ///
+  /// A bundle looks like this before it's finalized:
+  ///   ----------------
+  ///   |      MI      |
+  ///   ----------------
+  ///          |
+  ///   ----------------
+  ///   |      MI    * |
+  ///   ----------------
+  ///          |
+  ///   ----------------
+  ///   |      MI    * |
+  ///   ----------------
+  /// In this case, the first MI starts a bundle but is not inside a bundle, the
+  /// next 2 MIs are considered "inside" the bundle.
+  ///
+  /// After a bundle is finalized, it looks like this:
+  ///   ----------------
+  ///   |    Bundle    |
+  ///   ----------------
+  ///          |
+  ///   ----------------
+  ///   |      MI    * |
+  ///   ----------------
+  ///          |
+  ///   ----------------
+  ///   |      MI    * |
+  ///   ----------------
+  ///          |
+  ///   ----------------
+  ///   |      MI    * |
+  ///   ----------------
+  /// The first instruction has the special opcode "BUNDLE". It's not "inside"
+  /// a bundle, but the next three MIs are.
+  bool isInsideBundle() const {
+    return getFlag(BundledPred);
+  }
+
+  /// Return true if this instruction part of a bundle. This is true
+  /// if either itself or its following instruction is marked "InsideBundle".
+  bool isBundled() const {
+    return isBundledWithPred() || isBundledWithSucc();
+  }
+
+  /// Return true if this instruction is part of a bundle, and it is not the
+  /// first instruction in the bundle.
+  bool isBundledWithPred() const { return getFlag(BundledPred); }
+
+  /// Return true if this instruction is part of a bundle, and it is not the
+  /// last instruction in the bundle.
+  bool isBundledWithSucc() const { return getFlag(BundledSucc); }
+
+  /// Bundle this instruction with its predecessor. This can be an unbundled
+  /// instruction, or it can be the first instruction in a bundle.
+  void bundleWithPred();
+
+  /// Bundle this instruction with its successor. This can be an unbundled
+  /// instruction, or it can be the last instruction in a bundle.
+  void bundleWithSucc();
+
+  /// Break bundle above this instruction.
+  void unbundleFromPred();
+
+  /// Break bundle below this instruction.
+  void unbundleFromSucc();
+
+  /// Returns the debug location id of this MachineInstr.
+  const DebugLoc &getDebugLoc() const { return debugLoc; }
+
+  /// Return the debug variable referenced by
+  /// this DBG_VALUE instruction.
+  const DILocalVariable *getDebugVariable() const;
+
+  /// Return the complex address expression referenced by
+  /// this DBG_VALUE instruction.
+  const DIExpression *getDebugExpression() const;
+
+  /// Emit an error referring to the source location of this instruction.
+  /// This should only be used for inline assembly that is somehow
+  /// impossible to compile. Other errors should have been handled much
+  /// earlier.
+  ///
+  /// If this method returns, the caller should try to recover from the error.
+  void emitError(StringRef Msg) const;
+
+  /// Returns the target instruction descriptor of this MachineInstr.
+  const MCInstrDesc &getDesc() const { return *MCID; }
+
+  /// Returns the opcode of this MachineInstr.
+  unsigned getOpcode() const { return MCID->Opcode; }
+
+  /// Access to explicit operands of the instruction.
+  unsigned getNumOperands() const { return NumOperands; }
+
+  const MachineOperand& getOperand(unsigned i) const {
+    assert(i < getNumOperands() && "getOperand() out of range!");
+    return Operands[i];
+  }
+  MachineOperand& getOperand(unsigned i) {
+    assert(i < getNumOperands() && "getOperand() out of range!");
+    return Operands[i];
+  }
+
+  /// Return true if operand \p OpIdx is a subregister index.
+  bool isOperandSubregIdx(unsigned OpIdx) const {
+    assert(getOperand(OpIdx).getType() == MachineOperand::MO_Immediate &&
+           "Expected MO_Immediate operand type.");
+    if (isExtractSubreg() && OpIdx == 2)
+      return true;
+    if (isInsertSubreg() && OpIdx == 3)
+      return true;
+    if (isRegSequence() && OpIdx > 1 && (OpIdx % 2) == 0)
+      return true;
+    if (isSubregToReg() && OpIdx == 3)
+      return true;
+    return false;
+  }
+
+  /// Returns the number of non-implicit operands.
+  unsigned getNumExplicitOperands() const;
+
+  /// iterator/begin/end - Iterate over all operands of a machine instruction.
+  using mop_iterator = MachineOperand *;
+  using const_mop_iterator = const MachineOperand *;
+
+  mop_iterator operands_begin() { return Operands; }
+  mop_iterator operands_end() { return Operands + NumOperands; }
+
+  const_mop_iterator operands_begin() const { return Operands; }
+  const_mop_iterator operands_end() const { return Operands + NumOperands; }
+
+  iterator_range<mop_iterator> operands() {
+    return make_range(operands_begin(), operands_end());
+  }
+  iterator_range<const_mop_iterator> operands() const {
+    return make_range(operands_begin(), operands_end());
+  }
+  iterator_range<mop_iterator> explicit_operands() {
+    return make_range(operands_begin(),
+                      operands_begin() + getNumExplicitOperands());
+  }
+  iterator_range<const_mop_iterator> explicit_operands() const {
+    return make_range(operands_begin(),
+                      operands_begin() + getNumExplicitOperands());
+  }
+  iterator_range<mop_iterator> implicit_operands() {
+    return make_range(explicit_operands().end(), operands_end());
+  }
+  iterator_range<const_mop_iterator> implicit_operands() const {
+    return make_range(explicit_operands().end(), operands_end());
+  }
+  /// Returns a range over all explicit operands that are register definitions.
+  /// Implicit definition are not included!
+  iterator_range<mop_iterator> defs() {
+    return make_range(operands_begin(),
+                      operands_begin() + getDesc().getNumDefs());
+  }
+  /// \copydoc defs()
+  iterator_range<const_mop_iterator> defs() const {
+    return make_range(operands_begin(),
+                      operands_begin() + getDesc().getNumDefs());
+  }
+  /// Returns a range that includes all operands that are register uses.
+  /// This may include unrelated operands which are not register uses.
+  iterator_range<mop_iterator> uses() {
+    return make_range(operands_begin() + getDesc().getNumDefs(),
+                      operands_end());
+  }
+  /// \copydoc uses()
+  iterator_range<const_mop_iterator> uses() const {
+    return make_range(operands_begin() + getDesc().getNumDefs(),
+                      operands_end());
+  }
+  iterator_range<mop_iterator> explicit_uses() {
+    return make_range(operands_begin() + getDesc().getNumDefs(),
+                      operands_begin() + getNumExplicitOperands() );
+  }
+  iterator_range<const_mop_iterator> explicit_uses() const {
+    return make_range(operands_begin() + getDesc().getNumDefs(),
+                      operands_begin() + getNumExplicitOperands() );
+  }
+
+  /// Returns the number of the operand iterator \p I points to.
+  unsigned getOperandNo(const_mop_iterator I) const {
+    return I - operands_begin();
+  }
+
+  /// Access to memory operands of the instruction
+  mmo_iterator memoperands_begin() const { return MemRefs; }
+  mmo_iterator memoperands_end() const { return MemRefs + NumMemRefs; }
+  /// Return true if we don't have any memory operands which described the
+  /// memory access done by this instruction.  If this is true, calling code
+  /// must be conservative.
+  bool memoperands_empty() const { return NumMemRefs == 0; }
+
+  iterator_range<mmo_iterator>  memoperands() {
+    return make_range(memoperands_begin(), memoperands_end());
+  }
+  iterator_range<mmo_iterator> memoperands() const {
+    return make_range(memoperands_begin(), memoperands_end());
+  }
+
+  /// Return true if this instruction has exactly one MachineMemOperand.
+  bool hasOneMemOperand() const {
+    return NumMemRefs == 1;
+  }
+
+  /// Return the number of memory operands.
+  unsigned getNumMemOperands() const { return NumMemRefs; }
+
+  /// API for querying MachineInstr properties. They are the same as MCInstrDesc
+  /// queries but they are bundle aware.
+
+  enum QueryType {
+    IgnoreBundle,    // Ignore bundles
+    AnyInBundle,     // Return true if any instruction in bundle has property
+    AllInBundle      // Return true if all instructions in bundle have property
+  };
+
+  /// Return true if the instruction (or in the case of a bundle,
+  /// the instructions inside the bundle) has the specified property.
+  /// The first argument is the property being queried.
+  /// The second argument indicates whether the query should look inside
+  /// instruction bundles.
+  bool hasProperty(unsigned MCFlag, QueryType Type = AnyInBundle) const {
+    // Inline the fast path for unbundled or bundle-internal instructions.
+    if (Type == IgnoreBundle || !isBundled() || isBundledWithPred())
+      return getDesc().getFlags() & (1ULL << MCFlag);
+
+    // If this is the first instruction in a bundle, take the slow path.
+    return hasPropertyInBundle(1ULL << MCFlag, Type);
+  }
+
+  /// Return true if this instruction can have a variable number of operands.
+  /// In this case, the variable operands will be after the normal
+  /// operands but before the implicit definitions and uses (if any are
+  /// present).
+  bool isVariadic(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::Variadic, Type);
+  }
+
+  /// Set if this instruction has an optional definition, e.g.
+  /// ARM instructions which can set condition code if 's' bit is set.
+  bool hasOptionalDef(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::HasOptionalDef, Type);
+  }
+
+  /// Return true if this is a pseudo instruction that doesn't
+  /// correspond to a real machine instruction.
+  bool isPseudo(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::Pseudo, Type);
+  }
+
+  bool isReturn(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::Return, Type);
+  }
+
+  bool isCall(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::Call, Type);
+  }
+
+  /// Returns true if the specified instruction stops control flow
+  /// from executing the instruction immediately following it.  Examples include
+  /// unconditional branches and return instructions.
+  bool isBarrier(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::Barrier, Type);
+  }
+
+  /// Returns true if this instruction part of the terminator for a basic block.
+  /// Typically this is things like return and branch instructions.
+  ///
+  /// Various passes use this to insert code into the bottom of a basic block,
+  /// but before control flow occurs.
+  bool isTerminator(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::Terminator, Type);
+  }
+
+  /// Returns true if this is a conditional, unconditional, or indirect branch.
+  /// Predicates below can be used to discriminate between
+  /// these cases, and the TargetInstrInfo::AnalyzeBranch method can be used to
+  /// get more information.
+  bool isBranch(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::Branch, Type);
+  }
+
+  /// Return true if this is an indirect branch, such as a
+  /// branch through a register.
+  bool isIndirectBranch(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::IndirectBranch, Type);
+  }
+
+  /// Return true if this is a branch which may fall
+  /// through to the next instruction or may transfer control flow to some other
+  /// block.  The TargetInstrInfo::AnalyzeBranch method can be used to get more
+  /// information about this branch.
+  bool isConditionalBranch(QueryType Type = AnyInBundle) const {
+    return isBranch(Type) & !isBarrier(Type) & !isIndirectBranch(Type);
+  }
+
+  /// Return true if this is a branch which always
+  /// transfers control flow to some other block.  The
+  /// TargetInstrInfo::AnalyzeBranch method can be used to get more information
+  /// about this branch.
+  bool isUnconditionalBranch(QueryType Type = AnyInBundle) const {
+    return isBranch(Type) & isBarrier(Type) & !isIndirectBranch(Type);
+  }
+
+  /// Return true if this instruction has a predicate operand that
+  /// controls execution.  It may be set to 'always', or may be set to other
+  /// values.   There are various methods in TargetInstrInfo that can be used to
+  /// control and modify the predicate in this instruction.
+  bool isPredicable(QueryType Type = AllInBundle) const {
+    // If it's a bundle than all bundled instructions must be predicable for this
+    // to return true.
+    return hasProperty(MCID::Predicable, Type);
+  }
+
+  /// Return true if this instruction is a comparison.
+  bool isCompare(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::Compare, Type);
+  }
+
+  /// Return true if this instruction is a move immediate
+  /// (including conditional moves) instruction.
+  bool isMoveImmediate(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::MoveImm, Type);
+  }
+
+  /// Return true if this instruction is a bitcast instruction.
+  bool isBitcast(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::Bitcast, Type);
+  }
+
+  /// Return true if this instruction is a select instruction.
+  bool isSelect(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::Select, Type);
+  }
+
+  /// Return true if this instruction cannot be safely duplicated.
+  /// For example, if the instruction has a unique labels attached
+  /// to it, duplicating it would cause multiple definition errors.
+  bool isNotDuplicable(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::NotDuplicable, Type);
+  }
+
+  /// Return true if this instruction is convergent.
+  /// Convergent instructions can not be made control-dependent on any
+  /// additional values.
+  bool isConvergent(QueryType Type = AnyInBundle) const {
+    if (isInlineAsm()) {
+      unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
+      if (ExtraInfo & InlineAsm::Extra_IsConvergent)
+        return true;
+    }
+    return hasProperty(MCID::Convergent, Type);
+  }
+
+  /// Returns true if the specified instruction has a delay slot
+  /// which must be filled by the code generator.
+  bool hasDelaySlot(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::DelaySlot, Type);
+  }
+
+  /// Return true for instructions that can be folded as
+  /// memory operands in other instructions. The most common use for this
+  /// is instructions that are simple loads from memory that don't modify
+  /// the loaded value in any way, but it can also be used for instructions
+  /// that can be expressed as constant-pool loads, such as V_SETALLONES
+  /// on x86, to allow them to be folded when it is beneficial.
+  /// This should only be set on instructions that return a value in their
+  /// only virtual register definition.
+  bool canFoldAsLoad(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::FoldableAsLoad, Type);
+  }
+
+  /// \brief Return true if this instruction behaves
+  /// the same way as the generic REG_SEQUENCE instructions.
+  /// E.g., on ARM,
+  /// dX VMOVDRR rY, rZ
+  /// is equivalent to
+  /// dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1.
+  ///
+  /// Note that for the optimizers to be able to take advantage of
+  /// this property, TargetInstrInfo::getRegSequenceLikeInputs has to be
+  /// override accordingly.
+  bool isRegSequenceLike(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::RegSequence, Type);
+  }
+
+  /// \brief Return true if this instruction behaves
+  /// the same way as the generic EXTRACT_SUBREG instructions.
+  /// E.g., on ARM,
+  /// rX, rY VMOVRRD dZ
+  /// is equivalent to two EXTRACT_SUBREG:
+  /// rX = EXTRACT_SUBREG dZ, ssub_0
+  /// rY = EXTRACT_SUBREG dZ, ssub_1
+  ///
+  /// Note that for the optimizers to be able to take advantage of
+  /// this property, TargetInstrInfo::getExtractSubregLikeInputs has to be
+  /// override accordingly.
+  bool isExtractSubregLike(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::ExtractSubreg, Type);
+  }
+
+  /// \brief Return true if this instruction behaves
+  /// the same way as the generic INSERT_SUBREG instructions.
+  /// E.g., on ARM,
+  /// dX = VSETLNi32 dY, rZ, Imm
+  /// is equivalent to a INSERT_SUBREG:
+  /// dX = INSERT_SUBREG dY, rZ, translateImmToSubIdx(Imm)
+  ///
+  /// Note that for the optimizers to be able to take advantage of
+  /// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be
+  /// override accordingly.
+  bool isInsertSubregLike(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::InsertSubreg, Type);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Side Effect Analysis
+  //===--------------------------------------------------------------------===//
+
+  /// Return true if this instruction could possibly read memory.
+  /// Instructions with this flag set are not necessarily simple load
+  /// instructions, they may load a value and modify it, for example.
+  bool mayLoad(QueryType Type = AnyInBundle) const {
+    if (isInlineAsm()) {
+      unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
+      if (ExtraInfo & InlineAsm::Extra_MayLoad)
+        return true;
+    }
+    return hasProperty(MCID::MayLoad, Type);
+  }
+
+  /// Return true if this instruction could possibly modify memory.
+  /// Instructions with this flag set are not necessarily simple store
+  /// instructions, they may store a modified value based on their operands, or
+  /// may not actually modify anything, for example.
+  bool mayStore(QueryType Type = AnyInBundle) const {
+    if (isInlineAsm()) {
+      unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
+      if (ExtraInfo & InlineAsm::Extra_MayStore)
+        return true;
+    }
+    return hasProperty(MCID::MayStore, Type);
+  }
+
+  /// Return true if this instruction could possibly read or modify memory.
+  bool mayLoadOrStore(QueryType Type = AnyInBundle) const {
+    return mayLoad(Type) || mayStore(Type);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Flags that indicate whether an instruction can be modified by a method.
+  //===--------------------------------------------------------------------===//
+
+  /// Return true if this may be a 2- or 3-address
+  /// instruction (of the form "X = op Y, Z, ..."), which produces the same
+  /// result if Y and Z are exchanged.  If this flag is set, then the
+  /// TargetInstrInfo::commuteInstruction method may be used to hack on the
+  /// instruction.
+  ///
+  /// Note that this flag may be set on instructions that are only commutable
+  /// sometimes.  In these cases, the call to commuteInstruction will fail.
+  /// Also note that some instructions require non-trivial modification to
+  /// commute them.
+  bool isCommutable(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::Commutable, Type);
+  }
+
+  /// Return true if this is a 2-address instruction
+  /// which can be changed into a 3-address instruction if needed.  Doing this
+  /// transformation can be profitable in the register allocator, because it
+  /// means that the instruction can use a 2-address form if possible, but
+  /// degrade into a less efficient form if the source and dest register cannot
+  /// be assigned to the same register.  For example, this allows the x86
+  /// backend to turn a "shl reg, 3" instruction into an LEA instruction, which
+  /// is the same speed as the shift but has bigger code size.
+  ///
+  /// If this returns true, then the target must implement the
+  /// TargetInstrInfo::convertToThreeAddress method for this instruction, which
+  /// is allowed to fail if the transformation isn't valid for this specific
+  /// instruction (e.g. shl reg, 4 on x86).
+  ///
+  bool isConvertibleTo3Addr(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::ConvertibleTo3Addr, Type);
+  }
+
+  /// Return true if this instruction requires
+  /// custom insertion support when the DAG scheduler is inserting it into a
+  /// machine basic block.  If this is true for the instruction, it basically
+  /// means that it is a pseudo instruction used at SelectionDAG time that is
+  /// expanded out into magic code by the target when MachineInstrs are formed.
+  ///
+  /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method
+  /// is used to insert this into the MachineBasicBlock.
+  bool usesCustomInsertionHook(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::UsesCustomInserter, Type);
+  }
+
+  /// Return true if this instruction requires *adjustment*
+  /// after instruction selection by calling a target hook. For example, this
+  /// can be used to fill in ARM 's' optional operand depending on whether
+  /// the conditional flag register is used.
+  bool hasPostISelHook(QueryType Type = IgnoreBundle) const {
+    return hasProperty(MCID::HasPostISelHook, Type);
+  }
+
+  /// Returns true if this instruction is a candidate for remat.
+  /// This flag is deprecated, please don't use it anymore.  If this
+  /// flag is set, the isReallyTriviallyReMaterializable() method is called to
+  /// verify the instruction is really rematable.
+  bool isRematerializable(QueryType Type = AllInBundle) const {
+    // It's only possible to re-mat a bundle if all bundled instructions are
+    // re-materializable.
+    return hasProperty(MCID::Rematerializable, Type);
+  }
+
+  /// Returns true if this instruction has the same cost (or less) than a move
+  /// instruction. This is useful during certain types of optimizations
+  /// (e.g., remat during two-address conversion or machine licm)
+  /// where we would like to remat or hoist the instruction, but not if it costs
+  /// more than moving the instruction into the appropriate register. Note, we
+  /// are not marking copies from and to the same register class with this flag.
+  bool isAsCheapAsAMove(QueryType Type = AllInBundle) const {
+    // Only returns true for a bundle if all bundled instructions are cheap.
+    return hasProperty(MCID::CheapAsAMove, Type);
+  }
+
+  /// Returns true if this instruction source operands
+  /// have special register allocation requirements that are not captured by the
+  /// operand register classes. e.g. ARM::STRD's two source registers must be an
+  /// even / odd pair, ARM::STM registers have to be in ascending order.
+  /// Post-register allocation passes should not attempt to change allocations
+  /// for sources of instructions with this flag.
+  bool hasExtraSrcRegAllocReq(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::ExtraSrcRegAllocReq, Type);
+  }
+
+  /// Returns true if this instruction def operands
+  /// have special register allocation requirements that are not captured by the
+  /// operand register classes. e.g. ARM::LDRD's two def registers must be an
+  /// even / odd pair, ARM::LDM registers have to be in ascending order.
+  /// Post-register allocation passes should not attempt to change allocations
+  /// for definitions of instructions with this flag.
+  bool hasExtraDefRegAllocReq(QueryType Type = AnyInBundle) const {
+    return hasProperty(MCID::ExtraDefRegAllocReq, Type);
+  }
+
+  enum MICheckType {
+    CheckDefs,      // Check all operands for equality
+    CheckKillDead,  // Check all operands including kill / dead markers
+    IgnoreDefs,     // Ignore all definitions
+    IgnoreVRegDefs  // Ignore virtual register definitions
+  };
+
+  /// Return true if this instruction is identical to \p Other.
+  /// Two instructions are identical if they have the same opcode and all their
+  /// operands are identical (with respect to MachineOperand::isIdenticalTo()).
+  /// Note that this means liveness related flags (dead, undef, kill) do not
+  /// affect the notion of identical.
+  bool isIdenticalTo(const MachineInstr &Other,
+                     MICheckType Check = CheckDefs) const;
+
+  /// Unlink 'this' from the containing basic block, and return it without
+  /// deleting it.
+  ///
+  /// This function can not be used on bundled instructions, use
+  /// removeFromBundle() to remove individual instructions from a bundle.
+  MachineInstr *removeFromParent();
+
+  /// Unlink this instruction from its basic block and return it without
+  /// deleting it.
+  ///
+  /// If the instruction is part of a bundle, the other instructions in the
+  /// bundle remain bundled.
+  MachineInstr *removeFromBundle();
+
+  /// Unlink 'this' from the containing basic block and delete it.
+  ///
+  /// If this instruction is the header of a bundle, the whole bundle is erased.
+  /// This function can not be used for instructions inside a bundle, use
+  /// eraseFromBundle() to erase individual bundled instructions.
+  void eraseFromParent();
+
+  /// Unlink 'this' from the containing basic block and delete it.
+  ///
+  /// For all definitions mark their uses in DBG_VALUE nodes
+  /// as undefined. Otherwise like eraseFromParent().
+  void eraseFromParentAndMarkDBGValuesForRemoval();
+
+  /// Unlink 'this' form its basic block and delete it.
+  ///
+  /// If the instruction is part of a bundle, the other instructions in the
+  /// bundle remain bundled.
+  void eraseFromBundle();
+
+  bool isEHLabel() const { return getOpcode() == TargetOpcode::EH_LABEL; }
+  bool isGCLabel() const { return getOpcode() == TargetOpcode::GC_LABEL; }
+  bool isAnnotationLabel() const {
+    return getOpcode() == TargetOpcode::ANNOTATION_LABEL;
+  }
+
+  /// Returns true if the MachineInstr represents a label.
+  bool isLabel() const {
+    return isEHLabel() || isGCLabel() || isAnnotationLabel();
+  }
+
+  bool isCFIInstruction() const {
+    return getOpcode() == TargetOpcode::CFI_INSTRUCTION;
+  }
+
+  // True if the instruction represents a position in the function.
+  bool isPosition() const { return isLabel() || isCFIInstruction(); }
+
+  bool isDebugValue() const { return getOpcode() == TargetOpcode::DBG_VALUE; }
+
+  /// A DBG_VALUE is indirect iff the first operand is a register and
+  /// the second operand is an immediate.
+  bool isIndirectDebugValue() const {
+    return isDebugValue()
+      && getOperand(0).isReg()
+      && getOperand(1).isImm();
+  }
+
+  bool isPHI() const {
+    return getOpcode() == TargetOpcode::PHI ||
+           getOpcode() == TargetOpcode::G_PHI;
+  }
+  bool isKill() const { return getOpcode() == TargetOpcode::KILL; }
+  bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
+  bool isInlineAsm() const { return getOpcode() == TargetOpcode::INLINEASM; }
+
+  bool isMSInlineAsm() const {
+    return getOpcode() == TargetOpcode::INLINEASM && getInlineAsmDialect();
+  }
+
+  bool isStackAligningInlineAsm() const;
+  InlineAsm::AsmDialect getInlineAsmDialect() const;
+
+  bool isInsertSubreg() const {
+    return getOpcode() == TargetOpcode::INSERT_SUBREG;
+  }
+
+  bool isSubregToReg() const {
+    return getOpcode() == TargetOpcode::SUBREG_TO_REG;
+  }
+
+  bool isRegSequence() const {
+    return getOpcode() == TargetOpcode::REG_SEQUENCE;
+  }
+
+  bool isBundle() const {
+    return getOpcode() == TargetOpcode::BUNDLE;
+  }
+
+  bool isCopy() const {
+    return getOpcode() == TargetOpcode::COPY;
+  }
+
+  bool isFullCopy() const {
+    return isCopy() && !getOperand(0).getSubReg() && !getOperand(1).getSubReg();
+  }
+
+  bool isExtractSubreg() const {
+    return getOpcode() == TargetOpcode::EXTRACT_SUBREG;
+  }
+
+  /// Return true if the instruction behaves like a copy.
+  /// This does not include native copy instructions.
+  bool isCopyLike() const {
+    return isCopy() || isSubregToReg();
+  }
+
+  /// Return true is the instruction is an identity copy.
+  bool isIdentityCopy() const {
+    return isCopy() && getOperand(0).getReg() == getOperand(1).getReg() &&
+      getOperand(0).getSubReg() == getOperand(1).getSubReg();
+  }
+
+  /// Return true if this instruction doesn't produce any output in the form of
+  /// executable instructions.
+  bool isMetaInstruction() const {
+    switch (getOpcode()) {
+    default:
+      return false;
+    case TargetOpcode::IMPLICIT_DEF:
+    case TargetOpcode::KILL:
+    case TargetOpcode::CFI_INSTRUCTION:
+    case TargetOpcode::EH_LABEL:
+    case TargetOpcode::GC_LABEL:
+    case TargetOpcode::DBG_VALUE:
+    case TargetOpcode::LIFETIME_START:
+    case TargetOpcode::LIFETIME_END:
+      return true;
+    }
+  }
+
+  /// Return true if this is a transient instruction that is either very likely
+  /// to be eliminated during register allocation (such as copy-like
+  /// instructions), or if this instruction doesn't have an execution-time cost.
+  bool isTransient() const {
+    switch (getOpcode()) {
+    default:
+      return isMetaInstruction();
+    // Copy-like instructions are usually eliminated during register allocation.
+    case TargetOpcode::PHI:
+    case TargetOpcode::G_PHI:
+    case TargetOpcode::COPY:
+    case TargetOpcode::INSERT_SUBREG:
+    case TargetOpcode::SUBREG_TO_REG:
+    case TargetOpcode::REG_SEQUENCE:
+      return true;
+    }
+  }
+
+  /// Return the number of instructions inside the MI bundle, excluding the
+  /// bundle header.
+  ///
+  /// This is the number of instructions that MachineBasicBlock::iterator
+  /// skips, 0 for unbundled instructions.
+  unsigned getBundleSize() const;
+
+  /// Return true if the MachineInstr reads the specified register.
+  /// If TargetRegisterInfo is passed, then it also checks if there
+  /// is a read of a super-register.
+  /// This does not count partial redefines of virtual registers as reads:
+  ///   %reg1024:6 = OP.
+  bool readsRegister(unsigned Reg,
+                     const TargetRegisterInfo *TRI = nullptr) const {
+    return findRegisterUseOperandIdx(Reg, false, TRI) != -1;
+  }
+
+  /// Return true if the MachineInstr reads the specified virtual register.
+  /// Take into account that a partial define is a
+  /// read-modify-write operation.
+  bool readsVirtualRegister(unsigned Reg) const {
+    return readsWritesVirtualRegister(Reg).first;
+  }
+
+  /// Return a pair of bools (reads, writes) indicating if this instruction
+  /// reads or writes Reg. This also considers partial defines.
+  /// If Ops is not null, all operand indices for Reg are added.
+  std::pair<bool,bool> readsWritesVirtualRegister(unsigned Reg,
+                                SmallVectorImpl<unsigned> *Ops = nullptr) const;
+
+  /// Return true if the MachineInstr kills the specified register.
+  /// If TargetRegisterInfo is passed, then it also checks if there is
+  /// a kill of a super-register.
+  bool killsRegister(unsigned Reg,
+                     const TargetRegisterInfo *TRI = nullptr) const {
+    return findRegisterUseOperandIdx(Reg, true, TRI) != -1;
+  }
+
+  /// Return true if the MachineInstr fully defines the specified register.
+  /// If TargetRegisterInfo is passed, then it also checks
+  /// if there is a def of a super-register.
+  /// NOTE: It's ignoring subreg indices on virtual registers.
+  bool definesRegister(unsigned Reg,
+                       const TargetRegisterInfo *TRI = nullptr) const {
+    return findRegisterDefOperandIdx(Reg, false, false, TRI) != -1;
+  }
+
+  /// Return true if the MachineInstr modifies (fully define or partially
+  /// define) the specified register.
+  /// NOTE: It's ignoring subreg indices on virtual registers.
+  bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const {
+    return findRegisterDefOperandIdx(Reg, false, true, TRI) != -1;
+  }
+
+  /// Returns true if the register is dead in this machine instruction.
+  /// If TargetRegisterInfo is passed, then it also checks
+  /// if there is a dead def of a super-register.
+  bool registerDefIsDead(unsigned Reg,
+                         const TargetRegisterInfo *TRI = nullptr) const {
+    return findRegisterDefOperandIdx(Reg, true, false, TRI) != -1;
+  }
+
+  /// Returns true if the MachineInstr has an implicit-use operand of exactly
+  /// the given register (not considering sub/super-registers).
+  bool hasRegisterImplicitUseOperand(unsigned Reg) const;
+
+  /// Returns the operand index that is a use of the specific register or -1
+  /// if it is not found. It further tightens the search criteria to a use
+  /// that kills the register if isKill is true.
+  int findRegisterUseOperandIdx(unsigned Reg, bool isKill = false,
+                                const TargetRegisterInfo *TRI = nullptr) const;
+
+  /// Wrapper for findRegisterUseOperandIdx, it returns
+  /// a pointer to the MachineOperand rather than an index.
+  MachineOperand *findRegisterUseOperand(unsigned Reg, bool isKill = false,
+                                      const TargetRegisterInfo *TRI = nullptr) {
+    int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI);
+    return (Idx == -1) ? nullptr : &getOperand(Idx);
+  }
+
+  const MachineOperand *findRegisterUseOperand(
+    unsigned Reg, bool isKill = false,
+    const TargetRegisterInfo *TRI = nullptr) const {
+    return const_cast<MachineInstr *>(this)->
+      findRegisterUseOperand(Reg, isKill, TRI);
+  }
+
+  /// Returns the operand index that is a def of the specified register or
+  /// -1 if it is not found. If isDead is true, defs that are not dead are
+  /// skipped. If Overlap is true, then it also looks for defs that merely
+  /// overlap the specified register. If TargetRegisterInfo is non-null,
+  /// then it also checks if there is a def of a super-register.
+  /// This may also return a register mask operand when Overlap is true.
+  int findRegisterDefOperandIdx(unsigned Reg,
+                                bool isDead = false, bool Overlap = false,
+                                const TargetRegisterInfo *TRI = nullptr) const;
+
+  /// Wrapper for findRegisterDefOperandIdx, it returns
+  /// a pointer to the MachineOperand rather than an index.
+  MachineOperand *findRegisterDefOperand(unsigned Reg, bool isDead = false,
+                                      const TargetRegisterInfo *TRI = nullptr) {
+    int Idx = findRegisterDefOperandIdx(Reg, isDead, false, TRI);
+    return (Idx == -1) ? nullptr : &getOperand(Idx);
+  }
+
+  /// Find the index of the first operand in the
+  /// operand list that is used to represent the predicate. It returns -1 if
+  /// none is found.
+  int findFirstPredOperandIdx() const;
+
+  /// Find the index of the flag word operand that
+  /// corresponds to operand OpIdx on an inline asm instruction.  Returns -1 if
+  /// getOperand(OpIdx) does not belong to an inline asm operand group.
+  ///
+  /// If GroupNo is not NULL, it will receive the number of the operand group
+  /// containing OpIdx.
+  ///
+  /// The flag operand is an immediate that can be decoded with methods like
+  /// InlineAsm::hasRegClassConstraint().
+  int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = nullptr) const;
+
+  /// Compute the static register class constraint for operand OpIdx.
+  /// For normal instructions, this is derived from the MCInstrDesc.
+  /// For inline assembly it is derived from the flag words.
+  ///
+  /// Returns NULL if the static register class constraint cannot be
+  /// determined.
+  const TargetRegisterClass*
+  getRegClassConstraint(unsigned OpIdx,
+                        const TargetInstrInfo *TII,
+                        const TargetRegisterInfo *TRI) const;
+
+  /// \brief Applies the constraints (def/use) implied by this MI on \p Reg to
+  /// the given \p CurRC.
+  /// If \p ExploreBundle is set and MI is part of a bundle, all the
+  /// instructions inside the bundle will be taken into account. In other words,
+  /// this method accumulates all the constraints of the operand of this MI and
+  /// the related bundle if MI is a bundle or inside a bundle.
+  ///
+  /// Returns the register class that satisfies both \p CurRC and the
+  /// constraints set by MI. Returns NULL if such a register class does not
+  /// exist.
+  ///
+  /// \pre CurRC must not be NULL.
+  const TargetRegisterClass *getRegClassConstraintEffectForVReg(
+      unsigned Reg, const TargetRegisterClass *CurRC,
+      const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
+      bool ExploreBundle = false) const;
+
+  /// \brief Applies the constraints (def/use) implied by the \p OpIdx operand
+  /// to the given \p CurRC.
+  ///
+  /// Returns the register class that satisfies both \p CurRC and the
+  /// constraints set by \p OpIdx MI. Returns NULL if such a register class
+  /// does not exist.
+  ///
+  /// \pre CurRC must not be NULL.
+  /// \pre The operand at \p OpIdx must be a register.
+  const TargetRegisterClass *
+  getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC,
+                              const TargetInstrInfo *TII,
+                              const TargetRegisterInfo *TRI) const;
+
+  /// Add a tie between the register operands at DefIdx and UseIdx.
+  /// The tie will cause the register allocator to ensure that the two
+  /// operands are assigned the same physical register.
+  ///
+  /// Tied operands are managed automatically for explicit operands in the
+  /// MCInstrDesc. This method is for exceptional cases like inline asm.
+  void tieOperands(unsigned DefIdx, unsigned UseIdx);
+
+  /// Given the index of a tied register operand, find the
+  /// operand it is tied to. Defs are tied to uses and vice versa. Returns the
+  /// index of the tied operand which must exist.
+  unsigned findTiedOperandIdx(unsigned OpIdx) const;
+
+  /// Given the index of a register def operand,
+  /// check if the register def is tied to a source operand, due to either
+  /// two-address elimination or inline assembly constraints. Returns the
+  /// first tied use operand index by reference if UseOpIdx is not null.
+  bool isRegTiedToUseOperand(unsigned DefOpIdx,
+                             unsigned *UseOpIdx = nullptr) const {
+    const MachineOperand &MO = getOperand(DefOpIdx);
+    if (!MO.isReg() || !MO.isDef() || !MO.isTied())
+      return false;
+    if (UseOpIdx)
+      *UseOpIdx = findTiedOperandIdx(DefOpIdx);
+    return true;
+  }
+
+  /// Return true if the use operand of the specified index is tied to a def
+  /// operand. It also returns the def operand index by reference if DefOpIdx
+  /// is not null.
+  bool isRegTiedToDefOperand(unsigned UseOpIdx,
+                             unsigned *DefOpIdx = nullptr) const {
+    const MachineOperand &MO = getOperand(UseOpIdx);
+    if (!MO.isReg() || !MO.isUse() || !MO.isTied())
+      return false;
+    if (DefOpIdx)
+      *DefOpIdx = findTiedOperandIdx(UseOpIdx);
+    return true;
+  }
+
+  /// Clears kill flags on all operands.
+  void clearKillInfo();
+
+  /// Replace all occurrences of FromReg with ToReg:SubIdx,
+  /// properly composing subreg indices where necessary.
+  void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx,
+                          const TargetRegisterInfo &RegInfo);
+
+  /// We have determined MI kills a register. Look for the
+  /// operand that uses it and mark it as IsKill. If AddIfNotFound is true,
+  /// add a implicit operand if it's not found. Returns true if the operand
+  /// exists / is added.
+  bool addRegisterKilled(unsigned IncomingReg,
+                         const TargetRegisterInfo *RegInfo,
+                         bool AddIfNotFound = false);
+
+  /// Clear all kill flags affecting Reg.  If RegInfo is provided, this includes
+  /// all aliasing registers.
+  void clearRegisterKills(unsigned Reg, const TargetRegisterInfo *RegInfo);
+
+  /// We have determined MI defined a register without a use.
+  /// Look for the operand that defines it and mark it as IsDead. If
+  /// AddIfNotFound is true, add a implicit operand if it's not found. Returns
+  /// true if the operand exists / is added.
+  bool addRegisterDead(unsigned Reg, const TargetRegisterInfo *RegInfo,
+                       bool AddIfNotFound = false);
+
+  /// Clear all dead flags on operands defining register @p Reg.
+  void clearRegisterDeads(unsigned Reg);
+
+  /// Mark all subregister defs of register @p Reg with the undef flag.
+  /// This function is used when we determined to have a subregister def in an
+  /// otherwise undefined super register.
+  void setRegisterDefReadUndef(unsigned Reg, bool IsUndef = true);
+
+  /// We have determined MI defines a register. Make sure there is an operand
+  /// defining Reg.
+  void addRegisterDefined(unsigned Reg,
+                          const TargetRegisterInfo *RegInfo = nullptr);
+
+  /// Mark every physreg used by this instruction as
+  /// dead except those in the UsedRegs list.
+  ///
+  /// On instructions with register mask operands, also add implicit-def
+  /// operands for all registers in UsedRegs.
+  void setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
+                             const TargetRegisterInfo &TRI);
+
+  /// Return true if it is safe to move this instruction. If
+  /// SawStore is set to true, it means that there is a store (or call) between
+  /// the instruction's location and its intended destination.
+  bool isSafeToMove(AliasAnalysis *AA, bool &SawStore) const;
+
+  /// Returns true if this instruction's memory access aliases the memory
+  /// access of Other.
+  //
+  /// Assumes any physical registers used to compute addresses
+  /// have the same value for both instructions.  Returns false if neither
+  /// instruction writes to memory.
+  ///
+  /// @param AA Optional alias analysis, used to compare memory operands.
+  /// @param Other MachineInstr to check aliasing against.
+  /// @param UseTBAA Whether to pass TBAA information to alias analysis.
+  bool mayAlias(AliasAnalysis *AA, MachineInstr &Other, bool UseTBAA);
+
+  /// Return true if this instruction may have an ordered
+  /// or volatile memory reference, or if the information describing the memory
+  /// reference is not available. Return false if it is known to have no
+  /// ordered or volatile memory references.
+  bool hasOrderedMemoryRef() const;
+
+  /// Return true if this load instruction never traps and points to a memory
+  /// location whose value doesn't change during the execution of this function.
+  ///
+  /// Examples include loading a value from the constant pool or from the
+  /// argument area of a function (if it does not change).  If the instruction
+  /// does multiple loads, this returns true only if all of the loads are
+  /// dereferenceable and invariant.
+  bool isDereferenceableInvariantLoad(AliasAnalysis *AA) const;
+
+  /// If the specified instruction is a PHI that always merges together the
+  /// same virtual register, return the register, otherwise return 0.
+  unsigned isConstantValuePHI() const;
+
+  /// Return true if this instruction has side effects that are not modeled
+  /// by mayLoad / mayStore, etc.
+  /// For all instructions, the property is encoded in MCInstrDesc::Flags
+  /// (see MCInstrDesc::hasUnmodeledSideEffects(). The only exception is
+  /// INLINEASM instruction, in which case the side effect property is encoded
+  /// in one of its operands (see InlineAsm::Extra_HasSideEffect).
+  ///
+  bool hasUnmodeledSideEffects() const;
+
+  /// Returns true if it is illegal to fold a load across this instruction.
+  bool isLoadFoldBarrier() const;
+
+  /// Return true if all the defs of this instruction are dead.
+  bool allDefsAreDead() const;
+
+  /// Copy implicit register operands from specified
+  /// instruction to this instruction.
+  void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI);
+
+  /// Debugging support
+  /// @{
+  /// Determine the generic type to be printed (if needed) on uses and defs.
+  LLT getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
+                     const MachineRegisterInfo &MRI) const;
+
+  /// Return true when an instruction has tied register that can't be determined
+  /// by the instruction's descriptor. This is useful for MIR printing, to
+  /// determine whether we need to print the ties or not.
+  bool hasComplexRegisterTies() const;
+
+  /// Print this MI to \p OS.
+  /// Don't print information that can be inferred from other instructions if
+  /// \p IsStandalone is false. It is usually true when only a fragment of the
+  /// function is printed.
+  /// Only print the defs and the opcode if \p SkipOpers is true.
+  /// Otherwise, also print operands if \p SkipDebugLoc is true.
+  /// Otherwise, also print the debug loc, with a terminating newline.
+  /// \p TII is used to print the opcode name.  If it's not present, but the
+  /// MI is in a function, the opcode will be printed using the function's TII.
+  void print(raw_ostream &OS, bool IsStandalone = true, bool SkipOpers = false,
+             bool SkipDebugLoc = false,
+             const TargetInstrInfo *TII = nullptr) const;
+  void print(raw_ostream &OS, ModuleSlotTracker &MST, bool IsStandalone = true,
+             bool SkipOpers = false, bool SkipDebugLoc = false,
+             const TargetInstrInfo *TII = nullptr) const;
+  void dump() const;
+  /// @}
+
+  //===--------------------------------------------------------------------===//
+  // Accessors used to build up machine instructions.
+
+  /// Add the specified operand to the instruction.  If it is an implicit
+  /// operand, it is added to the end of the operand list.  If it is an
+  /// explicit operand it is added at the end of the explicit operand list
+  /// (before the first implicit operand).
+  ///
+  /// MF must be the machine function that was used to allocate this
+  /// instruction.
+  ///
+  /// MachineInstrBuilder provides a more convenient interface for creating
+  /// instructions and adding operands.
+  void addOperand(MachineFunction &MF, const MachineOperand &Op);
+
+  /// Add an operand without providing an MF reference. This only works for
+  /// instructions that are inserted in a basic block.
+  ///
+  /// MachineInstrBuilder and the two-argument addOperand(MF, MO) should be
+  /// preferred.
+  void addOperand(const MachineOperand &Op);
+
+  /// Replace the instruction descriptor (thus opcode) of
+  /// the current instruction with a new one.
+  void setDesc(const MCInstrDesc &tid) { MCID = &tid; }
+
+  /// Replace current source information with new such.
+  /// Avoid using this, the constructor argument is preferable.
+  void setDebugLoc(DebugLoc dl) {
+    debugLoc = std::move(dl);
+    assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
+  }
+
+  /// Erase an operand from an instruction, leaving it with one
+  /// fewer operand than it started with.
+  void RemoveOperand(unsigned i);
+
+  /// Add a MachineMemOperand to the machine instruction.
+  /// This function should be used only occasionally. The setMemRefs function
+  /// is the primary method for setting up a MachineInstr's MemRefs list.
+  void addMemOperand(MachineFunction &MF, MachineMemOperand *MO);
+
+  /// Assign this MachineInstr's memory reference descriptor list.
+  /// This does not transfer ownership.
+  void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
+    setMemRefs(std::make_pair(NewMemRefs, NewMemRefsEnd-NewMemRefs));
+  }
+
+  /// Assign this MachineInstr's memory reference descriptor list.  First
+  /// element in the pair is the begin iterator/pointer to the array; the
+  /// second is the number of MemoryOperands.  This does not transfer ownership
+  /// of the underlying memory.
+  void setMemRefs(std::pair<mmo_iterator, unsigned> NewMemRefs) {
+    MemRefs = NewMemRefs.first;
+    NumMemRefs = uint8_t(NewMemRefs.second);
+    assert(NumMemRefs == NewMemRefs.second &&
+           "Too many memrefs - must drop memory operands");
+  }
+
+  /// Return a set of memrefs (begin iterator, size) which conservatively
+  /// describe the memory behavior of both MachineInstrs.  This is appropriate
+  /// for use when merging two MachineInstrs into one. This routine does not
+  /// modify the memrefs of the this MachineInstr.
+  std::pair<mmo_iterator, unsigned> mergeMemRefsWith(const MachineInstr& Other);
+
+  /// Return the MIFlags which represent both MachineInstrs. This
+  /// should be used when merging two MachineInstrs into one. This routine does
+  /// not modify the MIFlags of this MachineInstr.
+  uint8_t mergeFlagsWith(const MachineInstr& Other) const;
+
+  /// Clear this MachineInstr's memory reference descriptor list.  This resets
+  /// the memrefs to their most conservative state.  This should be used only
+  /// as a last resort since it greatly pessimizes our knowledge of the memory
+  /// access performed by the instruction.
+  void dropMemRefs() {
+    MemRefs = nullptr;
+    NumMemRefs = 0;
+  }
+
+  /// Break any tie involving OpIdx.
+  void untieRegOperand(unsigned OpIdx) {
+    MachineOperand &MO = getOperand(OpIdx);
+    if (MO.isReg() && MO.isTied()) {
+      getOperand(findTiedOperandIdx(OpIdx)).TiedTo = 0;
+      MO.TiedTo = 0;
+    }
+  }
+
+  /// Add all implicit def and use operands to this instruction.
+  void addImplicitDefUseOperands(MachineFunction &MF);
+
+private:
+  /// If this instruction is embedded into a MachineFunction, return the
+  /// MachineRegisterInfo object for the current function, otherwise
+  /// return null.
+  MachineRegisterInfo *getRegInfo();
+
+  /// Unlink all of the register operands in this instruction from their
+  /// respective use lists.  This requires that the operands already be on their
+  /// use lists.
+  void RemoveRegOperandsFromUseLists(MachineRegisterInfo&);
+
+  /// Add all of the register operands in this instruction from their
+  /// respective use lists.  This requires that the operands not be on their
+  /// use lists yet.
+  void AddRegOperandsToUseLists(MachineRegisterInfo&);
+
+  /// Slow path for hasProperty when we're dealing with a bundle.
+  bool hasPropertyInBundle(unsigned Mask, QueryType Type) const;
+
+  /// \brief Implements the logic of getRegClassConstraintEffectForVReg for the
+  /// this MI and the given operand index \p OpIdx.
+  /// If the related operand does not constrained Reg, this returns CurRC.
+  const TargetRegisterClass *getRegClassConstraintEffectForVRegImpl(
+      unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC,
+      const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const;
+};
+
+/// Special DenseMapInfo traits to compare MachineInstr* by *value* of the
+/// instruction rather than by pointer value.
+/// The hashing and equality testing functions ignore definitions so this is
+/// useful for CSE, etc.
+struct MachineInstrExpressionTrait : DenseMapInfo<MachineInstr*> {
+  static inline MachineInstr *getEmptyKey() {
+    return nullptr;
+  }
+
+  static inline MachineInstr *getTombstoneKey() {
+    return reinterpret_cast<MachineInstr*>(-1);
+  }
+
+  static unsigned getHashValue(const MachineInstr* const &MI);
+
+  static bool isEqual(const MachineInstr* const &LHS,
+                      const MachineInstr* const &RHS) {
+    if (RHS == getEmptyKey() || RHS == getTombstoneKey() ||
+        LHS == getEmptyKey() || LHS == getTombstoneKey())
+      return LHS == RHS;
+    return LHS->isIdenticalTo(*RHS, MachineInstr::IgnoreVRegDefs);
+  }
+};
+
+//===----------------------------------------------------------------------===//
+// Debugging Support
+
+inline raw_ostream& operator<<(raw_ostream &OS, const MachineInstr &MI) {
+  MI.print(OS);
+  return OS;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEINSTR_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineInstrBuilder.h b/linux-x64/clang/include/llvm/CodeGen/MachineInstrBuilder.h
new file mode 100644
index 0000000..2df89b1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -0,0 +1,560 @@
+//===- CodeGen/MachineInstrBuilder.h - Simplify creation of MIs --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes a function named BuildMI, which is useful for dramatically
+// simplifying how MachineInstr's are created.  It allows use of code like this:
+//
+//   M = BuildMI(MBB, MI, DL, TII.get(X86::ADD8rr), Dst)
+//           .addReg(argVal1)
+//           .addReg(argVal2);
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEINSTRBUILDER_H
+#define LLVM_CODEGEN_MACHINEINSTRBUILDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBundle.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+class MCInstrDesc;
+class MDNode;
+
+namespace RegState {
+
+  enum {
+    Define         = 0x2,
+    Implicit       = 0x4,
+    Kill           = 0x8,
+    Dead           = 0x10,
+    Undef          = 0x20,
+    EarlyClobber   = 0x40,
+    Debug          = 0x80,
+    InternalRead   = 0x100,
+    Renamable      = 0x200,
+    DefineNoRead   = Define | Undef,
+    ImplicitDefine = Implicit | Define,
+    ImplicitKill   = Implicit | Kill
+  };
+
+} // end namespace RegState
+
+class MachineInstrBuilder {
+  MachineFunction *MF = nullptr;
+  MachineInstr *MI = nullptr;
+
+public:
+  MachineInstrBuilder() = default;
+
+  /// Create a MachineInstrBuilder for manipulating an existing instruction.
+  /// F must be the machine function that was used to allocate I.
+  MachineInstrBuilder(MachineFunction &F, MachineInstr *I) : MF(&F), MI(I) {}
+  MachineInstrBuilder(MachineFunction &F, MachineBasicBlock::iterator I)
+      : MF(&F), MI(&*I) {}
+
+  /// Allow automatic conversion to the machine instruction we are working on.
+  operator MachineInstr*() const { return MI; }
+  MachineInstr *operator->() const { return MI; }
+  operator MachineBasicBlock::iterator() const { return MI; }
+
+  /// If conversion operators fail, use this method to get the MachineInstr
+  /// explicitly.
+  MachineInstr *getInstr() const { return MI; }
+
+  /// Add a new virtual register operand.
+  const MachineInstrBuilder &addReg(unsigned RegNo, unsigned flags = 0,
+                                    unsigned SubReg = 0) const {
+    assert((flags & 0x1) == 0 &&
+           "Passing in 'true' to addReg is forbidden! Use enums instead.");
+    MI->addOperand(*MF, MachineOperand::CreateReg(RegNo,
+                                               flags & RegState::Define,
+                                               flags & RegState::Implicit,
+                                               flags & RegState::Kill,
+                                               flags & RegState::Dead,
+                                               flags & RegState::Undef,
+                                               flags & RegState::EarlyClobber,
+                                               SubReg,
+                                               flags & RegState::Debug,
+                                               flags & RegState::InternalRead,
+                                               flags & RegState::Renamable));
+    return *this;
+  }
+
+  /// Add a virtual register definition operand.
+  const MachineInstrBuilder &addDef(unsigned RegNo, unsigned Flags = 0,
+                                    unsigned SubReg = 0) const {
+    return addReg(RegNo, Flags | RegState::Define, SubReg);
+  }
+
+  /// Add a virtual register use operand. It is an error for Flags to contain
+  /// `RegState::Define` when calling this function.
+  const MachineInstrBuilder &addUse(unsigned RegNo, unsigned Flags = 0,
+                                    unsigned SubReg = 0) const {
+    assert(!(Flags & RegState::Define) &&
+           "Misleading addUse defines register, use addReg instead.");
+    return addReg(RegNo, Flags, SubReg);
+  }
+
+  /// Add a new immediate operand.
+  const MachineInstrBuilder &addImm(int64_t Val) const {
+    MI->addOperand(*MF, MachineOperand::CreateImm(Val));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addCImm(const ConstantInt *Val) const {
+    MI->addOperand(*MF, MachineOperand::CreateCImm(Val));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addFPImm(const ConstantFP *Val) const {
+    MI->addOperand(*MF, MachineOperand::CreateFPImm(Val));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addMBB(MachineBasicBlock *MBB,
+                                    unsigned char TargetFlags = 0) const {
+    MI->addOperand(*MF, MachineOperand::CreateMBB(MBB, TargetFlags));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addFrameIndex(int Idx) const {
+    MI->addOperand(*MF, MachineOperand::CreateFI(Idx));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addConstantPoolIndex(unsigned Idx,
+                                                  int Offset = 0,
+                                          unsigned char TargetFlags = 0) const {
+    MI->addOperand(*MF, MachineOperand::CreateCPI(Idx, Offset, TargetFlags));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addTargetIndex(unsigned Idx, int64_t Offset = 0,
+                                          unsigned char TargetFlags = 0) const {
+    MI->addOperand(*MF, MachineOperand::CreateTargetIndex(Idx, Offset,
+                                                          TargetFlags));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addJumpTableIndex(unsigned Idx,
+                                          unsigned char TargetFlags = 0) const {
+    MI->addOperand(*MF, MachineOperand::CreateJTI(Idx, TargetFlags));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addGlobalAddress(const GlobalValue *GV,
+                                              int64_t Offset = 0,
+                                          unsigned char TargetFlags = 0) const {
+    MI->addOperand(*MF, MachineOperand::CreateGA(GV, Offset, TargetFlags));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addExternalSymbol(const char *FnName,
+                                          unsigned char TargetFlags = 0) const {
+    MI->addOperand(*MF, MachineOperand::CreateES(FnName, TargetFlags));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addBlockAddress(const BlockAddress *BA,
+                                             int64_t Offset = 0,
+                                          unsigned char TargetFlags = 0) const {
+    MI->addOperand(*MF, MachineOperand::CreateBA(BA, Offset, TargetFlags));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addRegMask(const uint32_t *Mask) const {
+    MI->addOperand(*MF, MachineOperand::CreateRegMask(Mask));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addMemOperand(MachineMemOperand *MMO) const {
+    MI->addMemOperand(*MF, MMO);
+    return *this;
+  }
+
+  const MachineInstrBuilder &setMemRefs(MachineInstr::mmo_iterator b,
+                                        MachineInstr::mmo_iterator e) const {
+    MI->setMemRefs(b, e);
+    return *this;
+  }
+
+  const MachineInstrBuilder &setMemRefs(std::pair<MachineInstr::mmo_iterator,
+                                        unsigned> MemOperandsRef) const {
+    MI->setMemRefs(MemOperandsRef);
+    return *this;
+  }
+
+  const MachineInstrBuilder &add(const MachineOperand &MO) const {
+    MI->addOperand(*MF, MO);
+    return *this;
+  }
+
+  const MachineInstrBuilder &add(ArrayRef<MachineOperand> MOs) const {
+    for (const MachineOperand &MO : MOs) {
+      MI->addOperand(*MF, MO);
+    }
+    return *this;
+  }
+
+  const MachineInstrBuilder &addMetadata(const MDNode *MD) const {
+    MI->addOperand(*MF, MachineOperand::CreateMetadata(MD));
+    assert((MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable())
+                               : true) &&
+           "first MDNode argument of a DBG_VALUE not a variable");
+    return *this;
+  }
+
+  const MachineInstrBuilder &addCFIIndex(unsigned CFIIndex) const {
+    MI->addOperand(*MF, MachineOperand::CreateCFIIndex(CFIIndex));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addIntrinsicID(Intrinsic::ID ID) const {
+    MI->addOperand(*MF, MachineOperand::CreateIntrinsicID(ID));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addPredicate(CmpInst::Predicate Pred) const {
+    MI->addOperand(*MF, MachineOperand::CreatePredicate(Pred));
+    return *this;
+  }
+
+  const MachineInstrBuilder &addSym(MCSymbol *Sym,
+                                    unsigned char TargetFlags = 0) const {
+    MI->addOperand(*MF, MachineOperand::CreateMCSymbol(Sym, TargetFlags));
+    return *this;
+  }
+
+  const MachineInstrBuilder &setMIFlags(unsigned Flags) const {
+    MI->setFlags(Flags);
+    return *this;
+  }
+
+  const MachineInstrBuilder &setMIFlag(MachineInstr::MIFlag Flag) const {
+    MI->setFlag(Flag);
+    return *this;
+  }
+
+  // Add a displacement from an existing MachineOperand with an added offset.
+  const MachineInstrBuilder &addDisp(const MachineOperand &Disp, int64_t off,
+                                     unsigned char TargetFlags = 0) const {
+    // If caller specifies new TargetFlags then use it, otherwise the
+    // default behavior is to copy the target flags from the existing
+    // MachineOperand. This means if the caller wants to clear the
+    // target flags it needs to do so explicitly.
+    if (0 == TargetFlags)
+      TargetFlags = Disp.getTargetFlags();
+
+    switch (Disp.getType()) {
+      default:
+        llvm_unreachable("Unhandled operand type in addDisp()");
+      case MachineOperand::MO_Immediate:
+        return addImm(Disp.getImm() + off);
+      case MachineOperand::MO_ConstantPoolIndex:
+        return addConstantPoolIndex(Disp.getIndex(), Disp.getOffset() + off,
+                                    TargetFlags);
+      case MachineOperand::MO_GlobalAddress:
+        return addGlobalAddress(Disp.getGlobal(), Disp.getOffset() + off,
+                                TargetFlags);
+    }
+  }
+
+  /// Copy all the implicit operands from OtherMI onto this one.
+  const MachineInstrBuilder &
+  copyImplicitOps(const MachineInstr &OtherMI) const {
+    MI->copyImplicitOps(*MF, OtherMI);
+    return *this;
+  }
+
+  bool constrainAllUses(const TargetInstrInfo &TII,
+                        const TargetRegisterInfo &TRI,
+                        const RegisterBankInfo &RBI) const {
+    return constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
+  }
+};
+
+/// Builder interface. Specify how to create the initial instruction itself.
+inline MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
+                                   const MCInstrDesc &MCID) {
+  return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL));
+}
+
+/// This version of the builder sets up the first operand as a
+/// destination virtual register.
+inline MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
+                                   const MCInstrDesc &MCID, unsigned DestReg) {
+  return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL))
+           .addReg(DestReg, RegState::Define);
+}
+
+/// This version of the builder inserts the newly-built instruction before
+/// the given position in the given MachineBasicBlock, and sets up the first
+/// operand as a destination virtual register.
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+                                   MachineBasicBlock::iterator I,
+                                   const DebugLoc &DL, const MCInstrDesc &MCID,
+                                   unsigned DestReg) {
+  MachineFunction &MF = *BB.getParent();
+  MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
+  BB.insert(I, MI);
+  return MachineInstrBuilder(MF, MI).addReg(DestReg, RegState::Define);
+}
+
+/// This version of the builder inserts the newly-built instruction before
+/// the given position in the given MachineBasicBlock, and sets up the first
+/// operand as a destination virtual register.
+///
+/// If \c I is inside a bundle, then the newly inserted \a MachineInstr is
+/// added to the same bundle.
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+                                   MachineBasicBlock::instr_iterator I,
+                                   const DebugLoc &DL, const MCInstrDesc &MCID,
+                                   unsigned DestReg) {
+  MachineFunction &MF = *BB.getParent();
+  MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
+  BB.insert(I, MI);
+  return MachineInstrBuilder(MF, MI).addReg(DestReg, RegState::Define);
+}
+
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr &I,
+                                   const DebugLoc &DL, const MCInstrDesc &MCID,
+                                   unsigned DestReg) {
+  // Calling the overload for instr_iterator is always correct.  However, the
+  // definition is not available in headers, so inline the check.
+  if (I.isInsideBundle())
+    return BuildMI(BB, MachineBasicBlock::instr_iterator(I), DL, MCID, DestReg);
+  return BuildMI(BB, MachineBasicBlock::iterator(I), DL, MCID, DestReg);
+}
+
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr *I,
+                                   const DebugLoc &DL, const MCInstrDesc &MCID,
+                                   unsigned DestReg) {
+  return BuildMI(BB, *I, DL, MCID, DestReg);
+}
+
+/// This version of the builder inserts the newly-built instruction before the
+/// given position in the given MachineBasicBlock, and does NOT take a
+/// destination register.
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+                                   MachineBasicBlock::iterator I,
+                                   const DebugLoc &DL,
+                                   const MCInstrDesc &MCID) {
+  MachineFunction &MF = *BB.getParent();
+  MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
+  BB.insert(I, MI);
+  return MachineInstrBuilder(MF, MI);
+}
+
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+                                   MachineBasicBlock::instr_iterator I,
+                                   const DebugLoc &DL,
+                                   const MCInstrDesc &MCID) {
+  MachineFunction &MF = *BB.getParent();
+  MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
+  BB.insert(I, MI);
+  return MachineInstrBuilder(MF, MI);
+}
+
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr &I,
+                                   const DebugLoc &DL,
+                                   const MCInstrDesc &MCID) {
+  // Calling the overload for instr_iterator is always correct.  However, the
+  // definition is not available in headers, so inline the check.
+  if (I.isInsideBundle())
+    return BuildMI(BB, MachineBasicBlock::instr_iterator(I), DL, MCID);
+  return BuildMI(BB, MachineBasicBlock::iterator(I), DL, MCID);
+}
+
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr *I,
+                                   const DebugLoc &DL,
+                                   const MCInstrDesc &MCID) {
+  return BuildMI(BB, *I, DL, MCID);
+}
+
+/// This version of the builder inserts the newly-built instruction at the end
+/// of the given MachineBasicBlock, and does NOT take a destination register.
+inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB, const DebugLoc &DL,
+                                   const MCInstrDesc &MCID) {
+  return BuildMI(*BB, BB->end(), DL, MCID);
+}
+
+/// This version of the builder inserts the newly-built instruction at the
+/// end of the given MachineBasicBlock, and sets up the first operand as a
+/// destination virtual register.
+inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB, const DebugLoc &DL,
+                                   const MCInstrDesc &MCID, unsigned DestReg) {
+  return BuildMI(*BB, BB->end(), DL, MCID, DestReg);
+}
+
+/// This version of the builder builds a DBG_VALUE intrinsic
+/// for either a value in a register or a register-indirect
+/// address.  The convention is that a DBG_VALUE is indirect iff the
+/// second operand is an immediate.
+MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
+                            const MCInstrDesc &MCID, bool IsIndirect,
+                            unsigned Reg, const MDNode *Variable,
+                            const MDNode *Expr);
+
+/// This version of the builder builds a DBG_VALUE intrinsic
+/// for either a value in a register or a register-indirect
+/// address and inserts it at position I.
+MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+                            MachineBasicBlock::iterator I, const DebugLoc &DL,
+                            const MCInstrDesc &MCID, bool IsIndirect,
+                            unsigned Reg, const MDNode *Variable,
+                            const MDNode *Expr);
+
+/// Clone a DBG_VALUE whose value has been spilled to FrameIndex.
+MachineInstr *buildDbgValueForSpill(MachineBasicBlock &BB,
+                                    MachineBasicBlock::iterator I,
+                                    const MachineInstr &Orig, int FrameIndex);
+
+/// Update a DBG_VALUE whose value has been spilled to FrameIndex. Useful when
+/// modifying an instruction in place while iterating over a basic block.
+void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex);
+
+inline unsigned getDefRegState(bool B) {
+  return B ? RegState::Define : 0;
+}
+inline unsigned getImplRegState(bool B) {
+  return B ? RegState::Implicit : 0;
+}
+inline unsigned getKillRegState(bool B) {
+  return B ? RegState::Kill : 0;
+}
+inline unsigned getDeadRegState(bool B) {
+  return B ? RegState::Dead : 0;
+}
+inline unsigned getUndefRegState(bool B) {
+  return B ? RegState::Undef : 0;
+}
+inline unsigned getInternalReadRegState(bool B) {
+  return B ? RegState::InternalRead : 0;
+}
+inline unsigned getDebugRegState(bool B) {
+  return B ? RegState::Debug : 0;
+}
+inline unsigned getRenamableRegState(bool B) {
+  return B ? RegState::Renamable : 0;
+}
+
+/// Get all register state flags from machine operand \p RegOp.
+inline unsigned getRegState(const MachineOperand &RegOp) {
+  assert(RegOp.isReg() && "Not a register operand");
+  return getDefRegState(RegOp.isDef())                    |
+         getImplRegState(RegOp.isImplicit())              |
+         getKillRegState(RegOp.isKill())                  |
+         getDeadRegState(RegOp.isDead())                  |
+         getUndefRegState(RegOp.isUndef())                |
+         getInternalReadRegState(RegOp.isInternalRead())  |
+         getDebugRegState(RegOp.isDebug())                |
+         getRenamableRegState(
+             TargetRegisterInfo::isPhysicalRegister(RegOp.getReg()) &&
+             RegOp.isRenamable());
+}
+
+/// Helper class for constructing bundles of MachineInstrs.
+///
+/// MIBundleBuilder can create a bundle from scratch by inserting new
+/// MachineInstrs one at a time, or it can create a bundle from a sequence of
+/// existing MachineInstrs in a basic block.
+class MIBundleBuilder {
+  MachineBasicBlock &MBB;
+  MachineBasicBlock::instr_iterator Begin;
+  MachineBasicBlock::instr_iterator End;
+
+public:
+  /// Create an MIBundleBuilder that inserts instructions into a new bundle in
+  /// BB above the bundle or instruction at Pos.
+  MIBundleBuilder(MachineBasicBlock &BB, MachineBasicBlock::iterator Pos)
+      : MBB(BB), Begin(Pos.getInstrIterator()), End(Begin) {}
+
+  /// Create a bundle from the sequence of instructions between B and E.
+  MIBundleBuilder(MachineBasicBlock &BB, MachineBasicBlock::iterator B,
+                  MachineBasicBlock::iterator E)
+      : MBB(BB), Begin(B.getInstrIterator()), End(E.getInstrIterator()) {
+    assert(B != E && "No instructions to bundle");
+    ++B;
+    while (B != E) {
+      MachineInstr &MI = *B;
+      ++B;
+      MI.bundleWithPred();
+    }
+  }
+
+  /// Create an MIBundleBuilder representing an existing instruction or bundle
+  /// that has MI as its head.
+  explicit MIBundleBuilder(MachineInstr *MI)
+      : MBB(*MI->getParent()), Begin(MI),
+        End(getBundleEnd(MI->getIterator())) {}
+
+  /// Return a reference to the basic block containing this bundle.
+  MachineBasicBlock &getMBB() const { return MBB; }
+
+  /// Return true if no instructions have been inserted in this bundle yet.
+  /// Empty bundles aren't representable in a MachineBasicBlock.
+  bool empty() const { return Begin == End; }
+
+  /// Return an iterator to the first bundled instruction.
+  MachineBasicBlock::instr_iterator begin() const { return Begin; }
+
+  /// Return an iterator beyond the last bundled instruction.
+  MachineBasicBlock::instr_iterator end() const { return End; }
+
+  /// Insert MI into this bundle before I which must point to an instruction in
+  /// the bundle, or end().
+  MIBundleBuilder &insert(MachineBasicBlock::instr_iterator I,
+                          MachineInstr *MI) {
+    MBB.insert(I, MI);
+    if (I == Begin) {
+      if (!empty())
+        MI->bundleWithSucc();
+      Begin = MI->getIterator();
+      return *this;
+    }
+    if (I == End) {
+      MI->bundleWithPred();
+      return *this;
+    }
+    // MI was inserted in the middle of the bundle, so its neighbors' flags are
+    // already fine. Update MI's bundle flags manually.
+    MI->setFlag(MachineInstr::BundledPred);
+    MI->setFlag(MachineInstr::BundledSucc);
+    return *this;
+  }
+
+  /// Insert MI into MBB by prepending it to the instructions in the bundle.
+  /// MI will become the first instruction in the bundle.
+  MIBundleBuilder &prepend(MachineInstr *MI) {
+    return insert(begin(), MI);
+  }
+
+  /// Insert MI into MBB by appending it to the instructions in the bundle.
+  /// MI will become the last instruction in the bundle.
+  MIBundleBuilder &append(MachineInstr *MI) {
+    return insert(end(), MI);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEINSTRBUILDER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineInstrBundle.h b/linux-x64/clang/include/llvm/CodeGen/MachineInstrBundle.h
new file mode 100644
index 0000000..b5341fd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineInstrBundle.h
@@ -0,0 +1,261 @@
+//===-- CodeGen/MachineInstBundle.h - MI bundle utilities -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provide utility functions to manipulate machine instruction
+// bundles.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEINSTRBUNDLE_H
+#define LLVM_CODEGEN_MACHINEINSTRBUNDLE_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+
+namespace llvm {
+
+/// finalizeBundle - Finalize a machine instruction bundle which includes
+/// a sequence of instructions starting from FirstMI to LastMI (exclusive).
+/// This routine adds a BUNDLE instruction to represent the bundle, it adds
+/// IsInternalRead markers to MachineOperands which are defined inside the
+/// bundle, and it copies externally visible defs and uses to the BUNDLE
+/// instruction.
+void finalizeBundle(MachineBasicBlock &MBB,
+                    MachineBasicBlock::instr_iterator FirstMI,
+                    MachineBasicBlock::instr_iterator LastMI);
+
+/// finalizeBundle - Same functionality as the previous finalizeBundle except
+/// the last instruction in the bundle is not provided as an input. This is
+/// used in cases where bundles are pre-determined by marking instructions
+/// with 'InsideBundle' marker. It returns the MBB instruction iterator that
+/// points to the end of the bundle.
+MachineBasicBlock::instr_iterator finalizeBundle(MachineBasicBlock &MBB,
+                    MachineBasicBlock::instr_iterator FirstMI);
+
+/// finalizeBundles - Finalize instruction bundles in the specified
+/// MachineFunction. Return true if any bundles are finalized.
+bool finalizeBundles(MachineFunction &MF);
+
+/// Returns an iterator to the first instruction in the bundle containing \p I.
+inline MachineBasicBlock::instr_iterator getBundleStart(
+    MachineBasicBlock::instr_iterator I) {
+  while (I->isBundledWithPred())
+    --I;
+  return I;
+}
+
+/// Returns an iterator to the first instruction in the bundle containing \p I.
+inline MachineBasicBlock::const_instr_iterator getBundleStart(
+    MachineBasicBlock::const_instr_iterator I) {
+  while (I->isBundledWithPred())
+    --I;
+  return I;
+}
+
+/// Returns an iterator pointing beyond the bundle containing \p I.
+inline MachineBasicBlock::instr_iterator getBundleEnd(
+    MachineBasicBlock::instr_iterator I) {
+  while (I->isBundledWithSucc())
+    ++I;
+  return ++I;
+}
+
+/// Returns an iterator pointing beyond the bundle containing \p I.
+inline MachineBasicBlock::const_instr_iterator getBundleEnd(
+    MachineBasicBlock::const_instr_iterator I) {
+  while (I->isBundledWithSucc())
+    ++I;
+  return ++I;
+}
+
+//===----------------------------------------------------------------------===//
+// MachineOperand iterator
+//
+
+/// MachineOperandIteratorBase - Iterator that can visit all operands on a
+/// MachineInstr, or all operands on a bundle of MachineInstrs.  This class is
+/// not intended to be used directly, use one of the sub-classes instead.
+///
+/// Intended use:
+///
+///   for (MIBundleOperands MIO(MI); MIO.isValid(); ++MIO) {
+///     if (!MIO->isReg())
+///       continue;
+///     ...
+///   }
+///
+class MachineOperandIteratorBase {
+  MachineBasicBlock::instr_iterator InstrI, InstrE;
+  MachineInstr::mop_iterator OpI, OpE;
+
+  // If the operands on InstrI are exhausted, advance InstrI to the next
+  // bundled instruction with operands.
+  void advance() {
+    while (OpI == OpE) {
+      // Don't advance off the basic block, or into a new bundle.
+      if (++InstrI == InstrE || !InstrI->isInsideBundle())
+        break;
+      OpI = InstrI->operands_begin();
+      OpE = InstrI->operands_end();
+    }
+  }
+
+protected:
+  /// MachineOperandIteratorBase - Create an iterator that visits all operands
+  /// on MI, or all operands on every instruction in the bundle containing MI.
+  ///
+  /// @param MI The instruction to examine.
+  /// @param WholeBundle When true, visit all operands on the entire bundle.
+  ///
+  explicit MachineOperandIteratorBase(MachineInstr &MI, bool WholeBundle) {
+    if (WholeBundle) {
+      InstrI = getBundleStart(MI.getIterator());
+      InstrE = MI.getParent()->instr_end();
+    } else {
+      InstrI = InstrE = MI.getIterator();
+      ++InstrE;
+    }
+    OpI = InstrI->operands_begin();
+    OpE = InstrI->operands_end();
+    if (WholeBundle)
+      advance();
+  }
+
+  MachineOperand &deref() const { return *OpI; }
+
+public:
+  /// isValid - Returns true until all the operands have been visited.
+  bool isValid() const { return OpI != OpE; }
+
+  /// Preincrement.  Move to the next operand.
+  void operator++() {
+    assert(isValid() && "Cannot advance MIOperands beyond the last operand");
+    ++OpI;
+    advance();
+  }
+
+  /// getOperandNo - Returns the number of the current operand relative to its
+  /// instruction.
+  ///
+  unsigned getOperandNo() const {
+    return OpI - InstrI->operands_begin();
+  }
+
+  /// VirtRegInfo - Information about a virtual register used by a set of operands.
+  ///
+  struct VirtRegInfo {
+    /// Reads - One of the operands read the virtual register.  This does not
+    /// include undef or internal use operands, see MO::readsReg().
+    bool Reads;
+
+    /// Writes - One of the operands writes the virtual register.
+    bool Writes;
+
+    /// Tied - Uses and defs must use the same register. This can be because of
+    /// a two-address constraint, or there may be a partial redefinition of a
+    /// sub-register.
+    bool Tied;
+  };
+
+  /// Information about how a physical register Reg is used by a set of
+  /// operands.
+  struct PhysRegInfo {
+    /// There is a regmask operand indicating Reg is clobbered.
+    /// \see MachineOperand::CreateRegMask().
+    bool Clobbered;
+
+    /// Reg or one of its aliases is defined. The definition may only cover
+    /// parts of the register.
+    bool Defined;
+    /// Reg or a super-register is defined. The definition covers the full
+    /// register.
+    bool FullyDefined;
+
+    /// Reg or one of its aliases is read. The register may only be read
+    /// partially.
+    bool Read;
+    /// Reg or a super-register is read. The full register is read.
+    bool FullyRead;
+
+    /// Either:
+    /// - Reg is FullyDefined and all defs of reg or an overlapping
+    ///   register are dead, or
+    /// - Reg is completely dead because "defined" by a clobber.
+    bool DeadDef;
+
+    /// Reg is Defined and all defs of reg or an overlapping register are
+    /// dead.
+    bool PartialDeadDef;
+
+    /// There is a use operand of reg or a super-register with kill flag set.
+    bool Killed;
+  };
+
+  /// analyzeVirtReg - Analyze how the current instruction or bundle uses a
+  /// virtual register.  This function should not be called after operator++(),
+  /// it expects a fresh iterator.
+  ///
+  /// @param Reg The virtual register to analyze.
+  /// @param Ops When set, this vector will receive an (MI, OpNum) entry for
+  ///            each operand referring to Reg.
+  /// @returns A filled-in RegInfo struct.
+  VirtRegInfo analyzeVirtReg(unsigned Reg,
+           SmallVectorImpl<std::pair<MachineInstr*, unsigned> > *Ops = nullptr);
+
+  /// analyzePhysReg - Analyze how the current instruction or bundle uses a
+  /// physical register.  This function should not be called after operator++(),
+  /// it expects a fresh iterator.
+  ///
+  /// @param Reg The physical register to analyze.
+  /// @returns A filled-in PhysRegInfo struct.
+  PhysRegInfo analyzePhysReg(unsigned Reg, const TargetRegisterInfo *TRI);
+};
+
+/// MIOperands - Iterate over operands of a single instruction.
+///
+class MIOperands : public MachineOperandIteratorBase {
+public:
+  MIOperands(MachineInstr &MI) : MachineOperandIteratorBase(MI, false) {}
+  MachineOperand &operator* () const { return deref(); }
+  MachineOperand *operator->() const { return &deref(); }
+};
+
+/// ConstMIOperands - Iterate over operands of a single const instruction.
+///
+class ConstMIOperands : public MachineOperandIteratorBase {
+public:
+  ConstMIOperands(const MachineInstr &MI)
+      : MachineOperandIteratorBase(const_cast<MachineInstr &>(MI), false) {}
+  const MachineOperand &operator* () const { return deref(); }
+  const MachineOperand *operator->() const { return &deref(); }
+};
+
+/// MIBundleOperands - Iterate over all operands in a bundle of machine
+/// instructions.
+///
+class MIBundleOperands : public MachineOperandIteratorBase {
+public:
+  MIBundleOperands(MachineInstr &MI) : MachineOperandIteratorBase(MI, true) {}
+  MachineOperand &operator* () const { return deref(); }
+  MachineOperand *operator->() const { return &deref(); }
+};
+
+/// ConstMIBundleOperands - Iterate over all operands in a const bundle of
+/// machine instructions.
+///
+class ConstMIBundleOperands : public MachineOperandIteratorBase {
+public:
+  ConstMIBundleOperands(const MachineInstr &MI)
+      : MachineOperandIteratorBase(const_cast<MachineInstr &>(MI), true) {}
+  const MachineOperand &operator* () const { return deref(); }
+  const MachineOperand *operator->() const { return &deref(); }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineInstrBundleIterator.h b/linux-x64/clang/include/llvm/CodeGen/MachineInstrBundleIterator.h
new file mode 100644
index 0000000..5fe4964
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineInstrBundleIterator.h
@@ -0,0 +1,289 @@
+//===- llvm/CodeGen/MachineInstrBundleIterator.h ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines an iterator class that bundles MachineInstr.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEINSTRBUNDLEITERATOR_H
+#define LLVM_CODEGEN_MACHINEINSTRBUNDLEITERATOR_H
+
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/simple_ilist.h"
+#include <cassert>
+#include <iterator>
+#include <type_traits>
+
+namespace llvm {
+
+template <class T, bool IsReverse> struct MachineInstrBundleIteratorTraits;
+template <class T> struct MachineInstrBundleIteratorTraits<T, false> {
+  using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>;
+  using instr_iterator = typename list_type::iterator;
+  using nonconst_instr_iterator = typename list_type::iterator;
+  using const_instr_iterator = typename list_type::const_iterator;
+};
+template <class T> struct MachineInstrBundleIteratorTraits<T, true> {
+  using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>;
+  using instr_iterator = typename list_type::reverse_iterator;
+  using nonconst_instr_iterator = typename list_type::reverse_iterator;
+  using const_instr_iterator = typename list_type::const_reverse_iterator;
+};
+template <class T> struct MachineInstrBundleIteratorTraits<const T, false> {
+  using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>;
+  using instr_iterator = typename list_type::const_iterator;
+  using nonconst_instr_iterator = typename list_type::iterator;
+  using const_instr_iterator = typename list_type::const_iterator;
+};
+template <class T> struct MachineInstrBundleIteratorTraits<const T, true> {
+  using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>;
+  using instr_iterator = typename list_type::const_reverse_iterator;
+  using nonconst_instr_iterator = typename list_type::reverse_iterator;
+  using const_instr_iterator = typename list_type::const_reverse_iterator;
+};
+
+template <bool IsReverse> struct MachineInstrBundleIteratorHelper;
+template <> struct MachineInstrBundleIteratorHelper<false> {
+  /// Get the beginning of the current bundle.
+  template <class Iterator> static Iterator getBundleBegin(Iterator I) {
+    if (!I.isEnd())
+      while (I->isBundledWithPred())
+        --I;
+    return I;
+  }
+
+  /// Get the final node of the current bundle.
+  template <class Iterator> static Iterator getBundleFinal(Iterator I) {
+    if (!I.isEnd())
+      while (I->isBundledWithSucc())
+        ++I;
+    return I;
+  }
+
+  /// Increment forward ilist iterator.
+  template <class Iterator> static void increment(Iterator &I) {
+    I = std::next(getBundleFinal(I));
+  }
+
+  /// Decrement forward ilist iterator.
+  template <class Iterator> static void decrement(Iterator &I) {
+    I = getBundleBegin(std::prev(I));
+  }
+};
+
+template <> struct MachineInstrBundleIteratorHelper<true> {
+  /// Get the beginning of the current bundle.
+  template <class Iterator> static Iterator getBundleBegin(Iterator I) {
+    return MachineInstrBundleIteratorHelper<false>::getBundleBegin(
+               I.getReverse())
+        .getReverse();
+  }
+
+  /// Get the final node of the current bundle.
+  template <class Iterator> static Iterator getBundleFinal(Iterator I) {
+    return MachineInstrBundleIteratorHelper<false>::getBundleFinal(
+               I.getReverse())
+        .getReverse();
+  }
+
+  /// Increment reverse ilist iterator.
+  template <class Iterator> static void increment(Iterator &I) {
+    I = getBundleBegin(std::next(I));
+  }
+
+  /// Decrement reverse ilist iterator.
+  template <class Iterator> static void decrement(Iterator &I) {
+    I = std::prev(getBundleFinal(I));
+  }
+};
+
+/// MachineBasicBlock iterator that automatically skips over MIs that are
+/// inside bundles (i.e. walk top level MIs only).
+template <typename Ty, bool IsReverse = false>
+class MachineInstrBundleIterator : MachineInstrBundleIteratorHelper<IsReverse> {
+  using Traits = MachineInstrBundleIteratorTraits<Ty, IsReverse>;
+  using instr_iterator = typename Traits::instr_iterator;
+
+  instr_iterator MII;
+
+public:
+  using value_type = typename instr_iterator::value_type;
+  using difference_type = typename instr_iterator::difference_type;
+  using pointer = typename instr_iterator::pointer;
+  using reference = typename instr_iterator::reference;
+  using const_pointer = typename instr_iterator::const_pointer;
+  using const_reference = typename instr_iterator::const_reference;
+  using iterator_category = std::bidirectional_iterator_tag;
+
+private:
+  using nonconst_instr_iterator = typename Traits::nonconst_instr_iterator;
+  using const_instr_iterator = typename Traits::const_instr_iterator;
+  using nonconst_iterator =
+      MachineInstrBundleIterator<typename nonconst_instr_iterator::value_type,
+                                 IsReverse>;
+  using reverse_iterator = MachineInstrBundleIterator<Ty, !IsReverse>;
+
+public:
+  MachineInstrBundleIterator(instr_iterator MI) : MII(MI) {
+    assert((!MI.getNodePtr() || MI.isEnd() || !MI->isBundledWithPred()) &&
+           "It's not legal to initialize MachineInstrBundleIterator with a "
+           "bundled MI");
+  }
+
+  MachineInstrBundleIterator(reference MI) : MII(MI) {
+    assert(!MI.isBundledWithPred() && "It's not legal to initialize "
+                                      "MachineInstrBundleIterator with a "
+                                      "bundled MI");
+  }
+
+  MachineInstrBundleIterator(pointer MI) : MII(MI) {
+    // FIXME: This conversion should be explicit.
+    assert((!MI || !MI->isBundledWithPred()) && "It's not legal to initialize "
+                                                "MachineInstrBundleIterator "
+                                                "with a bundled MI");
+  }
+
+  // Template allows conversion from const to nonconst.
+  template <class OtherTy>
+  MachineInstrBundleIterator(
+      const MachineInstrBundleIterator<OtherTy, IsReverse> &I,
+      typename std::enable_if<std::is_convertible<OtherTy *, Ty *>::value,
+                              void *>::type = nullptr)
+      : MII(I.getInstrIterator()) {}
+
+  MachineInstrBundleIterator() : MII(nullptr) {}
+
+  /// Explicit conversion between forward/reverse iterators.
+  ///
+  /// Translate between forward and reverse iterators without changing range
+  /// boundaries.  The resulting iterator will dereference (and have a handle)
+  /// to the previous node, which is somewhat unexpected; but converting the
+  /// two endpoints in a range will give the same range in reverse.
+  ///
+  /// This matches std::reverse_iterator conversions.
+  explicit MachineInstrBundleIterator(
+      const MachineInstrBundleIterator<Ty, !IsReverse> &I)
+      : MachineInstrBundleIterator(++I.getReverse()) {}
+
+  /// Get the bundle iterator for the given instruction's bundle.
+  static MachineInstrBundleIterator getAtBundleBegin(instr_iterator MI) {
+    return MachineInstrBundleIteratorHelper<IsReverse>::getBundleBegin(MI);
+  }
+
+  reference operator*() const { return *MII; }
+  pointer operator->() const { return &operator*(); }
+
+  /// Check for null.
+  bool isValid() const { return MII.getNodePtr(); }
+
+  friend bool operator==(const MachineInstrBundleIterator &L,
+                         const MachineInstrBundleIterator &R) {
+    return L.MII == R.MII;
+  }
+  friend bool operator==(const MachineInstrBundleIterator &L,
+                         const const_instr_iterator &R) {
+    return L.MII == R; // Avoid assertion about validity of R.
+  }
+  friend bool operator==(const const_instr_iterator &L,
+                         const MachineInstrBundleIterator &R) {
+    return L == R.MII; // Avoid assertion about validity of L.
+  }
+  friend bool operator==(const MachineInstrBundleIterator &L,
+                         const nonconst_instr_iterator &R) {
+    return L.MII == R; // Avoid assertion about validity of R.
+  }
+  friend bool operator==(const nonconst_instr_iterator &L,
+                         const MachineInstrBundleIterator &R) {
+    return L == R.MII; // Avoid assertion about validity of L.
+  }
+  friend bool operator==(const MachineInstrBundleIterator &L, const_pointer R) {
+    return L == const_instr_iterator(R); // Avoid assertion about validity of R.
+  }
+  friend bool operator==(const_pointer L, const MachineInstrBundleIterator &R) {
+    return const_instr_iterator(L) == R; // Avoid assertion about validity of L.
+  }
+  friend bool operator==(const MachineInstrBundleIterator &L,
+                         const_reference R) {
+    return L == &R; // Avoid assertion about validity of R.
+  }
+  friend bool operator==(const_reference L,
+                         const MachineInstrBundleIterator &R) {
+    return &L == R; // Avoid assertion about validity of L.
+  }
+
+  friend bool operator!=(const MachineInstrBundleIterator &L,
+                         const MachineInstrBundleIterator &R) {
+    return !(L == R);
+  }
+  friend bool operator!=(const MachineInstrBundleIterator &L,
+                         const const_instr_iterator &R) {
+    return !(L == R);
+  }
+  friend bool operator!=(const const_instr_iterator &L,
+                         const MachineInstrBundleIterator &R) {
+    return !(L == R);
+  }
+  friend bool operator!=(const MachineInstrBundleIterator &L,
+                         const nonconst_instr_iterator &R) {
+    return !(L == R);
+  }
+  friend bool operator!=(const nonconst_instr_iterator &L,
+                         const MachineInstrBundleIterator &R) {
+    return !(L == R);
+  }
+  friend bool operator!=(const MachineInstrBundleIterator &L, const_pointer R) {
+    return !(L == R);
+  }
+  friend bool operator!=(const_pointer L, const MachineInstrBundleIterator &R) {
+    return !(L == R);
+  }
+  friend bool operator!=(const MachineInstrBundleIterator &L,
+                         const_reference R) {
+    return !(L == R);
+  }
+  friend bool operator!=(const_reference L,
+                         const MachineInstrBundleIterator &R) {
+    return !(L == R);
+  }
+
+  // Increment and decrement operators...
+  MachineInstrBundleIterator &operator--() {
+    this->decrement(MII);
+    return *this;
+  }
+  MachineInstrBundleIterator &operator++() {
+    this->increment(MII);
+    return *this;
+  }
+  MachineInstrBundleIterator operator--(int) {
+    MachineInstrBundleIterator Temp = *this;
+    --*this;
+    return Temp;
+  }
+  MachineInstrBundleIterator operator++(int) {
+    MachineInstrBundleIterator Temp = *this;
+    ++*this;
+    return Temp;
+  }
+
+  instr_iterator getInstrIterator() const { return MII; }
+
+  nonconst_iterator getNonConstIterator() const { return MII.getNonConst(); }
+
+  /// Get a reverse iterator to the same node.
+  ///
+  /// Gives a reverse iterator that will dereference (and have a handle) to the
+  /// same node.  Converting the endpoint iterators in a range will give a
+  /// different range; for range operations, use the explicit conversions.
+  reverse_iterator getReverse() const { return MII.getReverse(); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEINSTRBUNDLEITERATOR_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineJumpTableInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineJumpTableInfo.h
new file mode 100644
index 0000000..25a3e6b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineJumpTableInfo.h
@@ -0,0 +1,140 @@
+//===-- CodeGen/MachineJumpTableInfo.h - Abstract Jump Tables  --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The MachineJumpTableInfo class keeps track of jump tables referenced by
+// lowered switch instructions in the MachineFunction.
+//
+// Instructions reference the address of these jump tables through the use of
+// MO_JumpTableIndex values.  When emitting assembly or machine code, these
+// virtual address references are converted to refer to the address of the
+// function jump tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEJUMPTABLEINFO_H
+#define LLVM_CODEGEN_MACHINEJUMPTABLEINFO_H
+
+#include "llvm/Support/Printable.h"
+#include <cassert>
+#include <vector>
+
+namespace llvm {
+
+class MachineBasicBlock;
+class DataLayout;
+class raw_ostream;
+
+/// MachineJumpTableEntry - One jump table in the jump table info.
+///
+struct MachineJumpTableEntry {
+  /// MBBs - The vector of basic blocks from which to create the jump table.
+  std::vector<MachineBasicBlock*> MBBs;
+
+  explicit MachineJumpTableEntry(const std::vector<MachineBasicBlock*> &M)
+  : MBBs(M) {}
+};
+
+class MachineJumpTableInfo {
+public:
+  /// JTEntryKind - This enum indicates how each entry of the jump table is
+  /// represented and emitted.
+  enum JTEntryKind {
+    /// EK_BlockAddress - Each entry is a plain address of block, e.g.:
+    ///     .word LBB123
+    EK_BlockAddress,
+
+    /// EK_GPRel64BlockAddress - Each entry is an address of block, encoded
+    /// with a relocation as gp-relative, e.g.:
+    ///     .gpdword LBB123
+    EK_GPRel64BlockAddress,
+
+    /// EK_GPRel32BlockAddress - Each entry is an address of block, encoded
+    /// with a relocation as gp-relative, e.g.:
+    ///     .gprel32 LBB123
+    EK_GPRel32BlockAddress,
+
+    /// EK_LabelDifference32 - Each entry is the address of the block minus
+    /// the address of the jump table.  This is used for PIC jump tables where
+    /// gprel32 is not supported.  e.g.:
+    ///      .word LBB123 - LJTI1_2
+    /// If the .set directive is supported, this is emitted as:
+    ///      .set L4_5_set_123, LBB123 - LJTI1_2
+    ///      .word L4_5_set_123
+    EK_LabelDifference32,
+
+    /// EK_Inline - Jump table entries are emitted inline at their point of
+    /// use. It is the responsibility of the target to emit the entries.
+    EK_Inline,
+
+    /// EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the
+    /// TargetLowering::LowerCustomJumpTableEntry hook.
+    EK_Custom32
+  };
+private:
+  JTEntryKind EntryKind;
+  std::vector<MachineJumpTableEntry> JumpTables;
+public:
+  explicit MachineJumpTableInfo(JTEntryKind Kind): EntryKind(Kind) {}
+
+  JTEntryKind getEntryKind() const { return EntryKind; }
+
+  /// getEntrySize - Return the size of each entry in the jump table.
+  unsigned getEntrySize(const DataLayout &TD) const;
+  /// getEntryAlignment - Return the alignment of each entry in the jump table.
+  unsigned getEntryAlignment(const DataLayout &TD) const;
+
+  /// createJumpTableIndex - Create a new jump table.
+  ///
+  unsigned createJumpTableIndex(const std::vector<MachineBasicBlock*> &DestBBs);
+
+  /// isEmpty - Return true if there are no jump tables.
+  ///
+  bool isEmpty() const { return JumpTables.empty(); }
+
+  const std::vector<MachineJumpTableEntry> &getJumpTables() const {
+    return JumpTables;
+  }
+
+  /// RemoveJumpTable - Mark the specific index as being dead.  This will
+  /// prevent it from being emitted.
+  void RemoveJumpTable(unsigned Idx) {
+    JumpTables[Idx].MBBs.clear();
+  }
+
+  /// ReplaceMBBInJumpTables - If Old is the target of any jump tables, update
+  /// the jump tables to branch to New instead.
+  bool ReplaceMBBInJumpTables(MachineBasicBlock *Old, MachineBasicBlock *New);
+
+  /// ReplaceMBBInJumpTable - If Old is a target of the jump tables, update
+  /// the jump table to branch to New instead.
+  bool ReplaceMBBInJumpTable(unsigned Idx, MachineBasicBlock *Old,
+                             MachineBasicBlock *New);
+
+  /// print - Used by the MachineFunction printer to print information about
+  /// jump tables.  Implemented in MachineFunction.cpp
+  ///
+  void print(raw_ostream &OS) const;
+
+  /// dump - Call to stderr.
+  ///
+  void dump() const;
+};
+
+
+/// Prints a jump table entry reference.
+///
+/// The format is:
+///   %jump-table.5       - a jump table entry with index == 5.
+///
+/// Usage: OS << printJumpTableEntryReference(Idx) << '\n';
+Printable printJumpTableEntryReference(unsigned Idx);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineLoopInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineLoopInfo.h
new file mode 100644
index 0000000..104655e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineLoopInfo.h
@@ -0,0 +1,193 @@
+//===- llvm/CodeGen/MachineLoopInfo.h - Natural Loop Calculator -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MachineLoopInfo class that is used to identify natural
+// loops and determine the loop depth of various nodes of the CFG.  Note that
+// natural loops may actually be several loops that share the same header node.
+//
+// This analysis calculates the nesting structure of loops in a function.  For
+// each natural loop identified, this analysis identifies natural loops
+// contained entirely within the loop and the basic blocks the make up the loop.
+//
+// It can calculate on the fly various bits of information, for example:
+//
+//  * whether there is a preheader for the loop
+//  * the number of back edges to the header
+//  * whether or not a particular block branches out of the loop
+//  * the successor blocks of the loop
+//  * the loop depth
+//  * the trip count
+//  * etc...
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINELOOPINFO_H
+#define LLVM_CODEGEN_MACHINELOOPINFO_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+// Implementation in LoopInfoImpl.h
+class MachineLoop;
+extern template class LoopBase<MachineBasicBlock, MachineLoop>;
+
+class MachineLoop : public LoopBase<MachineBasicBlock, MachineLoop> {
+public:
+  /// Return the "top" block in the loop, which is the first block in the linear
+  /// layout, ignoring any parts of the loop not contiguous with the part that
+  /// contains the header.
+  MachineBasicBlock *getTopBlock();
+
+  /// Return the "bottom" block in the loop, which is the last block in the
+  /// linear layout, ignoring any parts of the loop not contiguous with the part
+  /// that contains the header.
+  MachineBasicBlock *getBottomBlock();
+
+  /// \brief Find the block that contains the loop control variable and the
+  /// loop test. This will return the latch block if it's one of the exiting
+  /// blocks. Otherwise, return the exiting block. Return 'null' when
+  /// multiple exiting blocks are present.
+  MachineBasicBlock *findLoopControlBlock();
+
+  /// Return the debug location of the start of this loop.
+  /// This looks for a BB terminating instruction with a known debug
+  /// location by looking at the preheader and header blocks. If it
+  /// cannot find a terminating instruction with location information,
+  /// it returns an unknown location.
+  DebugLoc getStartLoc() const;
+
+  void dump() const;
+
+private:
+  friend class LoopInfoBase<MachineBasicBlock, MachineLoop>;
+
+  explicit MachineLoop(MachineBasicBlock *MBB)
+    : LoopBase<MachineBasicBlock, MachineLoop>(MBB) {}
+
+  MachineLoop() = default;
+};
+
+// Implementation in LoopInfoImpl.h
+extern template class LoopInfoBase<MachineBasicBlock, MachineLoop>;
+
+class MachineLoopInfo : public MachineFunctionPass {
+  friend class LoopBase<MachineBasicBlock, MachineLoop>;
+
+  LoopInfoBase<MachineBasicBlock, MachineLoop> LI;
+
+public:
+  static char ID; // Pass identification, replacement for typeid
+
+  MachineLoopInfo() : MachineFunctionPass(ID) {
+    initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
+  }
+  MachineLoopInfo(const MachineLoopInfo &) = delete;
+  MachineLoopInfo &operator=(const MachineLoopInfo &) = delete;
+
+  LoopInfoBase<MachineBasicBlock, MachineLoop>& getBase() { return LI; }
+
+  /// \brief Find the block that either is the loop preheader, or could
+  /// speculatively be used as the preheader. This is e.g. useful to place
+  /// loop setup code. Code that cannot be speculated should not be placed
+  /// here. SpeculativePreheader is controlling whether it also tries to
+  /// find the speculative preheader if the regular preheader is not present.
+  MachineBasicBlock *findLoopPreheader(MachineLoop *L,
+                                       bool SpeculativePreheader = false) const;
+
+  /// The iterator interface to the top-level loops in the current function.
+  using iterator = LoopInfoBase<MachineBasicBlock, MachineLoop>::iterator;
+  inline iterator begin() const { return LI.begin(); }
+  inline iterator end() const { return LI.end(); }
+  bool empty() const { return LI.empty(); }
+
+  /// Return the innermost loop that BB lives in. If a basic block is in no loop
+  /// (for example the entry node), null is returned.
+  inline MachineLoop *getLoopFor(const MachineBasicBlock *BB) const {
+    return LI.getLoopFor(BB);
+  }
+
+  /// Same as getLoopFor.
+  inline const MachineLoop *operator[](const MachineBasicBlock *BB) const {
+    return LI.getLoopFor(BB);
+  }
+
+  /// Return the loop nesting level of the specified block.
+  inline unsigned getLoopDepth(const MachineBasicBlock *BB) const {
+    return LI.getLoopDepth(BB);
+  }
+
+  /// True if the block is a loop header node.
+  inline bool isLoopHeader(const MachineBasicBlock *BB) const {
+    return LI.isLoopHeader(BB);
+  }
+
+  /// Calculate the natural loop information.
+  bool runOnMachineFunction(MachineFunction &F) override;
+
+  void releaseMemory() override { LI.releaseMemory(); }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  /// This removes the specified top-level loop from this loop info object. The
+  /// loop is not deleted, as it will presumably be inserted into another loop.
+  inline MachineLoop *removeLoop(iterator I) { return LI.removeLoop(I); }
+
+  /// Change the top-level loop that contains BB to the specified loop. This
+  /// should be used by transformations that restructure the loop hierarchy
+  /// tree.
+  inline void changeLoopFor(MachineBasicBlock *BB, MachineLoop *L) {
+    LI.changeLoopFor(BB, L);
+  }
+
+  /// Replace the specified loop in the top-level loops list with the indicated
+  /// loop.
+  inline void changeTopLevelLoop(MachineLoop *OldLoop, MachineLoop *NewLoop) {
+    LI.changeTopLevelLoop(OldLoop, NewLoop);
+  }
+
+  /// This adds the specified loop to the collection of top-level loops.
+  inline void addTopLevelLoop(MachineLoop *New) {
+    LI.addTopLevelLoop(New);
+  }
+
+  /// This method completely removes BB from all data structures, including all
+  /// of the Loop objects it is nested in and our mapping from
+  /// MachineBasicBlocks to loops.
+  void removeBlock(MachineBasicBlock *BB) {
+    LI.removeBlock(BB);
+  }
+};
+
+// Allow clients to walk the list of nested loops...
+template <> struct GraphTraits<const MachineLoop*> {
+  using NodeRef = const MachineLoop *;
+  using ChildIteratorType = MachineLoopInfo::iterator;
+
+  static NodeRef getEntryNode(const MachineLoop *L) { return L; }
+  static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->end(); }
+};
+
+template <> struct GraphTraits<MachineLoop*> {
+  using NodeRef = MachineLoop *;
+  using ChildIteratorType = MachineLoopInfo::iterator;
+
+  static NodeRef getEntryNode(MachineLoop *L) { return L; }
+  static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
+  static ChildIteratorType child_end(NodeRef N) { return N->end(); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINELOOPINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineMemOperand.h b/linux-x64/clang/include/llvm/CodeGen/MachineMemOperand.h
new file mode 100644
index 0000000..dea0d80
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineMemOperand.h
@@ -0,0 +1,329 @@
+//==- llvm/CodeGen/MachineMemOperand.h - MachineMemOperand class -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MachineMemOperand class, which is a
+// description of a memory reference. It is used to help track dependencies
+// in the backend.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H
+#define LLVM_CODEGEN_MACHINEMEMOPERAND_H
+
+#include "llvm/ADT/BitmaskEnum.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Value.h" // PointerLikeTypeTraits<Value*>
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class FoldingSetNodeID;
+class MDNode;
+class raw_ostream;
+class MachineFunction;
+class ModuleSlotTracker;
+
+/// This class contains a discriminated union of information about pointers in
+/// memory operands, relating them back to LLVM IR or to virtual locations (such
+/// as frame indices) that are exposed during codegen.
+struct MachinePointerInfo {
+  /// This is the IR pointer value for the access, or it is null if unknown.
+  /// If this is null, then the access is to a pointer in the default address
+  /// space.
+  PointerUnion<const Value *, const PseudoSourceValue *> V;
+
+  /// Offset - This is an offset from the base Value*.
+  int64_t Offset;
+
+  uint8_t StackID;
+
+  unsigned AddrSpace = 0;
+
+  explicit MachinePointerInfo(const Value *v, int64_t offset = 0,
+                              uint8_t ID = 0)
+      : V(v), Offset(offset), StackID(ID) {
+    AddrSpace = v ? v->getType()->getPointerAddressSpace() : 0;
+  }
+
+  explicit MachinePointerInfo(const PseudoSourceValue *v, int64_t offset = 0,
+                              uint8_t ID = 0)
+      : V(v), Offset(offset), StackID(ID) {
+    AddrSpace = v ? v->getAddressSpace() : 0;
+  }
+
+  explicit MachinePointerInfo(unsigned AddressSpace = 0)
+      : V((const Value *)nullptr), Offset(0), StackID(0),
+        AddrSpace(AddressSpace) {}
+
+  explicit MachinePointerInfo(
+    PointerUnion<const Value *, const PseudoSourceValue *> v,
+    int64_t offset = 0,
+    uint8_t ID = 0)
+    : V(v), Offset(offset), StackID(ID) {
+    if (V) {
+      if (const auto *ValPtr = V.dyn_cast<const Value*>())
+        AddrSpace = ValPtr->getType()->getPointerAddressSpace();
+      else
+        AddrSpace = V.get<const PseudoSourceValue*>()->getAddressSpace();
+    }
+  }
+
+  MachinePointerInfo getWithOffset(int64_t O) const {
+    if (V.isNull())
+      return MachinePointerInfo(AddrSpace);
+    if (V.is<const Value*>())
+      return MachinePointerInfo(V.get<const Value*>(), Offset+O, StackID);
+    return MachinePointerInfo(V.get<const PseudoSourceValue*>(), Offset+O,
+                              StackID);
+  }
+
+  /// Return true if memory region [V, V+Offset+Size) is known to be
+  /// dereferenceable.
+  bool isDereferenceable(unsigned Size, LLVMContext &C,
+                         const DataLayout &DL) const;
+
+  /// Return the LLVM IR address space number that this pointer points into.
+  unsigned getAddrSpace() const;
+
+  /// Return a MachinePointerInfo record that refers to the constant pool.
+  static MachinePointerInfo getConstantPool(MachineFunction &MF);
+
+  /// Return a MachinePointerInfo record that refers to the specified
+  /// FrameIndex.
+  static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI,
+                                          int64_t Offset = 0);
+
+  /// Return a MachinePointerInfo record that refers to a jump table entry.
+  static MachinePointerInfo getJumpTable(MachineFunction &MF);
+
+  /// Return a MachinePointerInfo record that refers to a GOT entry.
+  static MachinePointerInfo getGOT(MachineFunction &MF);
+
+  /// Stack pointer relative access.
+  static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset,
+                                     uint8_t ID = 0);
+
+  /// Stack memory without other information.
+  static MachinePointerInfo getUnknownStack(MachineFunction &MF);
+};
+
+
+//===----------------------------------------------------------------------===//
+/// A description of a memory reference used in the backend.
+/// Instead of holding a StoreInst or LoadInst, this class holds the address
+/// Value of the reference along with a byte size and offset. This allows it
+/// to describe lowered loads and stores. Also, the special PseudoSourceValue
+/// objects can be used to represent loads and stores to memory locations
+/// that aren't explicit in the regular LLVM IR.
+///
+class MachineMemOperand {
+public:
+  /// Flags values. These may be or'd together.
+  enum Flags : uint16_t {
+    // No flags set.
+    MONone = 0,
+    /// The memory access reads data.
+    MOLoad = 1u << 0,
+    /// The memory access writes data.
+    MOStore = 1u << 1,
+    /// The memory access is volatile.
+    MOVolatile = 1u << 2,
+    /// The memory access is non-temporal.
+    MONonTemporal = 1u << 3,
+    /// The memory access is dereferenceable (i.e., doesn't trap).
+    MODereferenceable = 1u << 4,
+    /// The memory access always returns the same value (or traps).
+    MOInvariant = 1u << 5,
+
+    // Reserved for use by target-specific passes.
+    // Targets may override getSerializableMachineMemOperandTargetFlags() to
+    // enable MIR serialization/parsing of these flags.  If more of these flags
+    // are added, the MIR printing/parsing code will need to be updated as well.
+    MOTargetFlag1 = 1u << 6,
+    MOTargetFlag2 = 1u << 7,
+    MOTargetFlag3 = 1u << 8,
+
+    LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ MOTargetFlag3)
+  };
+
+private:
+  /// Atomic information for this memory operation.
+  struct MachineAtomicInfo {
+    /// Synchronization scope ID for this memory operation.
+    unsigned SSID : 8;            // SyncScope::ID
+    /// Atomic ordering requirements for this memory operation. For cmpxchg
+    /// atomic operations, atomic ordering requirements when store occurs.
+    unsigned Ordering : 4;        // enum AtomicOrdering
+    /// For cmpxchg atomic operations, atomic ordering requirements when store
+    /// does not occur.
+    unsigned FailureOrdering : 4; // enum AtomicOrdering
+  };
+
+  MachinePointerInfo PtrInfo;
+  uint64_t Size;
+  Flags FlagVals;
+  uint16_t BaseAlignLog2; // log_2(base_alignment) + 1
+  MachineAtomicInfo AtomicInfo;
+  AAMDNodes AAInfo;
+  const MDNode *Ranges;
+
+public:
+  /// Construct a MachineMemOperand object with the specified PtrInfo, flags,
+  /// size, and base alignment. For atomic operations the synchronization scope
+  /// and atomic ordering requirements must also be specified. For cmpxchg
+  /// atomic operations the atomic ordering requirements when store does not
+  /// occur must also be specified.
+  MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, uint64_t s,
+                    unsigned base_alignment,
+                    const AAMDNodes &AAInfo = AAMDNodes(),
+                    const MDNode *Ranges = nullptr,
+                    SyncScope::ID SSID = SyncScope::System,
+                    AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
+                    AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
+
+  const MachinePointerInfo &getPointerInfo() const { return PtrInfo; }
+
+  /// Return the base address of the memory access. This may either be a normal
+  /// LLVM IR Value, or one of the special values used in CodeGen.
+  /// Special values are those obtained via
+  /// PseudoSourceValue::getFixedStack(int), PseudoSourceValue::getStack, and
+  /// other PseudoSourceValue member functions which return objects which stand
+  /// for frame/stack pointer relative references and other special references
+  /// which are not representable in the high-level IR.
+  const Value *getValue() const { return PtrInfo.V.dyn_cast<const Value*>(); }
+
+  const PseudoSourceValue *getPseudoValue() const {
+    return PtrInfo.V.dyn_cast<const PseudoSourceValue*>();
+  }
+
+  const void *getOpaqueValue() const { return PtrInfo.V.getOpaqueValue(); }
+
+  /// Return the raw flags of the source value, \see Flags.
+  Flags getFlags() const { return FlagVals; }
+
+  /// Bitwise OR the current flags with the given flags.
+  void setFlags(Flags f) { FlagVals |= f; }
+
+  /// For normal values, this is a byte offset added to the base address.
+  /// For PseudoSourceValue::FPRel values, this is the FrameIndex number.
+  int64_t getOffset() const { return PtrInfo.Offset; }
+
+  unsigned getAddrSpace() const { return PtrInfo.getAddrSpace(); }
+
+  /// Return the size in bytes of the memory reference.
+  uint64_t getSize() const { return Size; }
+
+  /// Return the minimum known alignment in bytes of the actual memory
+  /// reference.
+  uint64_t getAlignment() const;
+
+  /// Return the minimum known alignment in bytes of the base address, without
+  /// the offset.
+  uint64_t getBaseAlignment() const { return (1u << BaseAlignLog2) >> 1; }
+
+  /// Return the AA tags for the memory reference.
+  AAMDNodes getAAInfo() const { return AAInfo; }
+
+  /// Return the range tag for the memory reference.
+  const MDNode *getRanges() const { return Ranges; }
+
+  /// Returns the synchronization scope ID for this memory operation.
+  SyncScope::ID getSyncScopeID() const {
+    return static_cast<SyncScope::ID>(AtomicInfo.SSID);
+  }
+
+  /// Return the atomic ordering requirements for this memory operation. For
+  /// cmpxchg atomic operations, return the atomic ordering requirements when
+  /// store occurs.
+  AtomicOrdering getOrdering() const {
+    return static_cast<AtomicOrdering>(AtomicInfo.Ordering);
+  }
+
+  /// For cmpxchg atomic operations, return the atomic ordering requirements
+  /// when store does not occur.
+  AtomicOrdering getFailureOrdering() const {
+    return static_cast<AtomicOrdering>(AtomicInfo.FailureOrdering);
+  }
+
+  bool isLoad() const { return FlagVals & MOLoad; }
+  bool isStore() const { return FlagVals & MOStore; }
+  bool isVolatile() const { return FlagVals & MOVolatile; }
+  bool isNonTemporal() const { return FlagVals & MONonTemporal; }
+  bool isDereferenceable() const { return FlagVals & MODereferenceable; }
+  bool isInvariant() const { return FlagVals & MOInvariant; }
+
+  /// Returns true if this operation has an atomic ordering requirement of
+  /// unordered or higher, false otherwise.
+  bool isAtomic() const { return getOrdering() != AtomicOrdering::NotAtomic; }
+
+  /// Returns true if this memory operation doesn't have any ordering
+  /// constraints other than normal aliasing. Volatile and atomic memory
+  /// operations can't be reordered.
+  ///
+  /// Currently, we don't model the difference between volatile and atomic
+  /// operations. They should retain their ordering relative to all memory
+  /// operations.
+  bool isUnordered() const { return !isVolatile(); }
+
+  /// Update this MachineMemOperand to reflect the alignment of MMO, if it has a
+  /// greater alignment. This must only be used when the new alignment applies
+  /// to all users of this MachineMemOperand.
+  void refineAlignment(const MachineMemOperand *MMO);
+
+  /// Change the SourceValue for this MachineMemOperand. This should only be
+  /// used when an object is being relocated and all references to it are being
+  /// updated.
+  void setValue(const Value *NewSV) { PtrInfo.V = NewSV; }
+  void setValue(const PseudoSourceValue *NewSV) { PtrInfo.V = NewSV; }
+  void setOffset(int64_t NewOffset) { PtrInfo.Offset = NewOffset; }
+
+  /// Profile - Gather unique data for the object.
+  ///
+  void Profile(FoldingSetNodeID &ID) const;
+
+  /// Support for operator<<.
+  /// @{
+  void print(raw_ostream &OS) const;
+  void print(raw_ostream &OS, ModuleSlotTracker &MST) const;
+  void print(raw_ostream &OS, ModuleSlotTracker &MST,
+             SmallVectorImpl<StringRef> &SSNs, const LLVMContext &Context,
+             const MachineFrameInfo *MFI, const TargetInstrInfo *TII) const;
+  /// @}
+
+  friend bool operator==(const MachineMemOperand &LHS,
+                         const MachineMemOperand &RHS) {
+    return LHS.getValue() == RHS.getValue() &&
+           LHS.getPseudoValue() == RHS.getPseudoValue() &&
+           LHS.getSize() == RHS.getSize() &&
+           LHS.getOffset() == RHS.getOffset() &&
+           LHS.getFlags() == RHS.getFlags() &&
+           LHS.getAAInfo() == RHS.getAAInfo() &&
+           LHS.getRanges() == RHS.getRanges() &&
+           LHS.getAlignment() == RHS.getAlignment() &&
+           LHS.getAddrSpace() == RHS.getAddrSpace();
+  }
+
+  friend bool operator!=(const MachineMemOperand &LHS,
+                         const MachineMemOperand &RHS) {
+    return !(LHS == RHS);
+  }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const MachineMemOperand &MRO) {
+  MRO.print(OS);
+  return OS;
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfo.h
new file mode 100644
index 0000000..6be304f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfo.h
@@ -0,0 +1,271 @@
+//===-- llvm/CodeGen/MachineModuleInfo.h ------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Collect meta information for a module.  This information should be in a
+// neutral form that can be used by different debugging and exception handling
+// schemes.
+//
+// The organization of information is primarily clustered around the source
+// compile units.  The main exception is source line correspondence where
+// inlining may interleave code from various compile units.
+//
+// The following information can be retrieved from the MachineModuleInfo.
+//
+//  -- Source directories - Directories are uniqued based on their canonical
+//     string and assigned a sequential numeric ID (base 1.)
+//  -- Source files - Files are also uniqued based on their name and directory
+//     ID.  A file ID is sequential number (base 1.)
+//  -- Source line correspondence - A vector of file ID, line#, column# triples.
+//     A DEBUG_LOCATION instruction is generated  by the DAG Legalizer
+//     corresponding to each entry in the source line list.  This allows a debug
+//     emitter to generate labels referenced by debug information tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEMODULEINFO_H
+#define LLVM_CODEGEN_MACHINEMODULEINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Pass.h"
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class BasicBlock;
+class CallInst;
+class Function;
+class MachineFunction;
+class MMIAddrLabelMap;
+class Module;
+class TargetMachine;
+
+//===----------------------------------------------------------------------===//
+/// This class can be derived from and used by targets to hold private
+/// target-specific information for each Module.  Objects of type are
+/// accessed/created with MMI::getInfo and destroyed when the MachineModuleInfo
+/// is destroyed.
+///
+class MachineModuleInfoImpl {
+public:
+  using StubValueTy = PointerIntPair<MCSymbol *, 1, bool>;
+  using SymbolListTy = std::vector<std::pair<MCSymbol *, StubValueTy>>;
+
+  virtual ~MachineModuleInfoImpl();
+
+protected:
+  /// Return the entries from a DenseMap in a deterministic sorted orer.
+  /// Clears the map.
+  static SymbolListTy getSortedStubs(DenseMap<MCSymbol*, StubValueTy>&);
+};
+
+//===----------------------------------------------------------------------===//
+/// This class contains meta information specific to a module.  Queries can be
+/// made by different debugging and exception handling schemes and reformated
+/// for specific use.
+///
+class MachineModuleInfo : public ImmutablePass {
+  const TargetMachine &TM;
+
+  /// This is the MCContext used for the entire code generator.
+  MCContext Context;
+
+  /// This is the LLVM Module being worked on.
+  const Module *TheModule;
+
+  /// This is the object-file-format-specific implementation of
+  /// MachineModuleInfoImpl, which lets targets accumulate whatever info they
+  /// want.
+  MachineModuleInfoImpl *ObjFileMMI;
+
+  /// \name Exception Handling
+  /// \{
+
+  /// Vector of all personality functions ever seen. Used to emit common EH
+  /// frames.
+  std::vector<const Function *> Personalities;
+
+  /// The current call site index being processed, if any. 0 if none.
+  unsigned CurCallSite;
+
+  /// \}
+
+  /// This map keeps track of which symbol is being used for the specified
+  /// basic block's address of label.
+  MMIAddrLabelMap *AddrLabelSymbols;
+
+  // TODO: Ideally, what we'd like is to have a switch that allows emitting 
+  // synchronous (precise at call-sites only) CFA into .eh_frame. However,
+  // even under this switch, we'd like .debug_frame to be precise when using
+  // -g. At this moment, there's no way to specify that some CFI directives
+  // go into .eh_frame only, while others go into .debug_frame only.
+
+  /// True if debugging information is available in this module.
+  bool DbgInfoAvailable;
+
+  /// True if this module calls VarArg function with floating-point arguments.
+  /// This is used to emit an undefined reference to _fltused on Windows
+  /// targets.
+  bool UsesVAFloatArgument;
+
+  /// True if the module calls the __morestack function indirectly, as is
+  /// required under the large code model on x86. This is used to emit
+  /// a definition of a symbol, __morestack_addr, containing the address. See
+  /// comments in lib/Target/X86/X86FrameLowering.cpp for more details.
+  bool UsesMorestackAddr;
+
+  /// True if the module contains split-stack functions. This is used to
+  /// emit .note.GNU-split-stack section as required by the linker for
+  /// special handling split-stack function calling no-split-stack function.
+  bool HasSplitStack;
+
+  /// True if the module contains no-split-stack functions. This is used to
+  /// emit .note.GNU-no-split-stack section when it also contains split-stack
+  /// functions.
+  bool HasNosplitStack;
+
+  /// Maps IR Functions to their corresponding MachineFunctions.
+  DenseMap<const Function*, std::unique_ptr<MachineFunction>> MachineFunctions;
+  /// Next unique number available for a MachineFunction.
+  unsigned NextFnNum = 0;
+  const Function *LastRequest = nullptr; ///< Used for shortcut/cache.
+  MachineFunction *LastResult = nullptr; ///< Used for shortcut/cache.
+
+public:
+  static char ID; // Pass identification, replacement for typeid
+
+  explicit MachineModuleInfo(const TargetMachine *TM = nullptr);
+  ~MachineModuleInfo() override;
+
+  // Initialization and Finalization
+  bool doInitialization(Module &) override;
+  bool doFinalization(Module &) override;
+
+  const MCContext &getContext() const { return Context; }
+  MCContext &getContext() { return Context; }
+
+  const Module *getModule() const { return TheModule; }
+
+  /// Returns the MachineFunction constructed for the IR function \p F.
+  /// Creates a new MachineFunction if none exists yet.
+  MachineFunction &getOrCreateMachineFunction(const Function &F);
+
+  /// \bried Returns the MachineFunction associated to IR function \p F if there
+  /// is one, otherwise nullptr.
+  MachineFunction *getMachineFunction(const Function &F) const;
+
+  /// Delete the MachineFunction \p MF and reset the link in the IR Function to
+  /// Machine Function map.
+  void deleteMachineFunctionFor(Function &F);
+
+  /// Keep track of various per-function pieces of information for backends
+  /// that would like to do so.
+  template<typename Ty>
+  Ty &getObjFileInfo() {
+    if (ObjFileMMI == nullptr)
+      ObjFileMMI = new Ty(*this);
+    return *static_cast<Ty*>(ObjFileMMI);
+  }
+
+  template<typename Ty>
+  const Ty &getObjFileInfo() const {
+    return const_cast<MachineModuleInfo*>(this)->getObjFileInfo<Ty>();
+  }
+
+  /// Returns true if valid debug info is present.
+  bool hasDebugInfo() const { return DbgInfoAvailable; }
+  void setDebugInfoAvailability(bool avail) { DbgInfoAvailable = avail; }
+
+  bool usesVAFloatArgument() const {
+    return UsesVAFloatArgument;
+  }
+
+  void setUsesVAFloatArgument(bool b) {
+    UsesVAFloatArgument = b;
+  }
+
+  bool usesMorestackAddr() const {
+    return UsesMorestackAddr;
+  }
+
+  void setUsesMorestackAddr(bool b) {
+    UsesMorestackAddr = b;
+  }
+
+  bool hasSplitStack() const {
+    return HasSplitStack;
+  }
+
+  void setHasSplitStack(bool b) {
+    HasSplitStack = b;
+  }
+
+  bool hasNosplitStack() const {
+    return HasNosplitStack;
+  }
+
+  void setHasNosplitStack(bool b) {
+    HasNosplitStack = b;
+  }
+
+  /// Return the symbol to be used for the specified basic block when its
+  /// address is taken.  This cannot be its normal LBB label because the block
+  /// may be accessed outside its containing function.
+  MCSymbol *getAddrLabelSymbol(const BasicBlock *BB) {
+    return getAddrLabelSymbolToEmit(BB).front();
+  }
+
+  /// Return the symbol to be used for the specified basic block when its
+  /// address is taken.  If other blocks were RAUW'd to this one, we may have
+  /// to emit them as well, return the whole set.
+  ArrayRef<MCSymbol *> getAddrLabelSymbolToEmit(const BasicBlock *BB);
+
+  /// If the specified function has had any references to address-taken blocks
+  /// generated, but the block got deleted, return the symbol now so we can
+  /// emit it.  This prevents emitting a reference to a symbol that has no
+  /// definition.
+  void takeDeletedSymbolsForFunction(const Function *F,
+                                     std::vector<MCSymbol*> &Result);
+
+  /// \name Exception Handling
+  /// \{
+
+  /// Set the call site currently being processed.
+  void setCurrentCallSite(unsigned Site) { CurCallSite = Site; }
+
+  /// Get the call site currently being processed, if any.  return zero if
+  /// none.
+  unsigned getCurrentCallSite() { return CurCallSite; }
+
+  /// Provide the personality function for the exception information.
+  void addPersonality(const Function *Personality);
+
+  /// Return array of personality functions ever seen.
+  const std::vector<const Function *>& getPersonalities() const {
+    return Personalities;
+  }
+  /// \}
+}; // End class MachineModuleInfo
+
+//===- MMI building helpers -----------------------------------------------===//
+
+/// Determine if any floating-point values are being passed to this variadic
+/// function, and set the MachineModuleInfo's usesVAFloatArgument flag if so.
+/// This flag is used to emit an undefined reference to _fltused on Windows,
+/// which will link in MSVCRT's floating-point support.
+void computeUsesVAFloatArgument(const CallInst &I, MachineModuleInfo &MMI);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEMODULEINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfoImpls.h b/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfoImpls.h
new file mode 100644
index 0000000..6a87fa2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineModuleInfoImpls.h
@@ -0,0 +1,85 @@
+//===- llvm/CodeGen/MachineModuleInfoImpls.h --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines object-file format specific implementations of
+// MachineModuleInfoImpl.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEMODULEINFOIMPLS_H
+#define LLVM_CODEGEN_MACHINEMODULEINFOIMPLS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include <cassert>
+
+namespace llvm {
+
+class MCSymbol;
+
+/// MachineModuleInfoMachO - This is a MachineModuleInfoImpl implementation
+/// for MachO targets.
+class MachineModuleInfoMachO : public MachineModuleInfoImpl {
+  /// GVStubs - Darwin '$non_lazy_ptr' stubs.  The key is something like
+  /// "Lfoo$non_lazy_ptr", the value is something like "_foo". The extra bit
+  /// is true if this GV is external.
+  DenseMap<MCSymbol *, StubValueTy> GVStubs;
+
+  /// ThreadLocalGVStubs - Darwin '$non_lazy_ptr' stubs.  The key is something
+  /// like "Lfoo$non_lazy_ptr", the value is something like "_foo". The extra
+  /// bit is true if this GV is external.
+  DenseMap<MCSymbol *, StubValueTy> ThreadLocalGVStubs;
+
+  virtual void anchor(); // Out of line virtual method.
+
+public:
+  MachineModuleInfoMachO(const MachineModuleInfo &) {}
+
+  StubValueTy &getGVStubEntry(MCSymbol *Sym) {
+    assert(Sym && "Key cannot be null");
+    return GVStubs[Sym];
+  }
+
+  StubValueTy &getThreadLocalGVStubEntry(MCSymbol *Sym) {
+    assert(Sym && "Key cannot be null");
+    return ThreadLocalGVStubs[Sym];
+  }
+
+  /// Accessor methods to return the set of stubs in sorted order.
+  SymbolListTy GetGVStubList() { return getSortedStubs(GVStubs); }
+  SymbolListTy GetThreadLocalGVStubList() {
+    return getSortedStubs(ThreadLocalGVStubs);
+  }
+};
+
+/// MachineModuleInfoELF - This is a MachineModuleInfoImpl implementation
+/// for ELF targets.
+class MachineModuleInfoELF : public MachineModuleInfoImpl {
+  /// GVStubs - These stubs are used to materialize global addresses in PIC
+  /// mode.
+  DenseMap<MCSymbol *, StubValueTy> GVStubs;
+
+  virtual void anchor(); // Out of line virtual method.
+
+public:
+  MachineModuleInfoELF(const MachineModuleInfo &) {}
+
+  StubValueTy &getGVStubEntry(MCSymbol *Sym) {
+    assert(Sym && "Key cannot be null");
+    return GVStubs[Sym];
+  }
+
+  /// Accessor methods to return the set of stubs in sorted order.
+
+  SymbolListTy GetGVStubList() { return getSortedStubs(GVStubs); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEMODULEINFOIMPLS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineOperand.h b/linux-x64/clang/include/llvm/CodeGen/MachineOperand.h
new file mode 100644
index 0000000..4f0db1c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineOperand.h
@@ -0,0 +1,941 @@
+//===-- llvm/CodeGen/MachineOperand.h - MachineOperand class ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MachineOperand class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEOPERAND_H
+#define LLVM_CODEGEN_MACHINEOPERAND_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/LowLevelTypeImpl.h"
+#include <cassert>
+
+namespace llvm {
+
+class BlockAddress;
+class ConstantFP;
+class ConstantInt;
+class GlobalValue;
+class MachineBasicBlock;
+class MachineInstr;
+class MachineRegisterInfo;
+class MCCFIInstruction;
+class MDNode;
+class ModuleSlotTracker;
+class TargetMachine;
+class TargetIntrinsicInfo;
+class TargetRegisterInfo;
+class hash_code;
+class raw_ostream;
+class MCSymbol;
+
+/// MachineOperand class - Representation of each machine instruction operand.
+///
+/// This class isn't a POD type because it has a private constructor, but its
+/// destructor must be trivial. Functions like MachineInstr::addOperand(),
+/// MachineRegisterInfo::moveOperands(), and MF::DeleteMachineInstr() depend on
+/// not having to call the MachineOperand destructor.
+///
+class MachineOperand {
+public:
+  enum MachineOperandType : unsigned char {
+    MO_Register,          ///< Register operand.
+    MO_Immediate,         ///< Immediate operand
+    MO_CImmediate,        ///< Immediate >64bit operand
+    MO_FPImmediate,       ///< Floating-point immediate operand
+    MO_MachineBasicBlock, ///< MachineBasicBlock reference
+    MO_FrameIndex,        ///< Abstract Stack Frame Index
+    MO_ConstantPoolIndex, ///< Address of indexed Constant in Constant Pool
+    MO_TargetIndex,       ///< Target-dependent index+offset operand.
+    MO_JumpTableIndex,    ///< Address of indexed Jump Table for switch
+    MO_ExternalSymbol,    ///< Name of external global symbol
+    MO_GlobalAddress,     ///< Address of a global value
+    MO_BlockAddress,      ///< Address of a basic block
+    MO_RegisterMask,      ///< Mask of preserved registers.
+    MO_RegisterLiveOut,   ///< Mask of live-out registers.
+    MO_Metadata,          ///< Metadata reference (for debug info)
+    MO_MCSymbol,          ///< MCSymbol reference (for debug/eh info)
+    MO_CFIIndex,          ///< MCCFIInstruction index.
+    MO_IntrinsicID,       ///< Intrinsic ID for ISel
+    MO_Predicate,         ///< Generic predicate for ISel
+    MO_Last = MO_Predicate,
+  };
+
+private:
+  /// OpKind - Specify what kind of operand this is.  This discriminates the
+  /// union.
+  unsigned OpKind : 8;
+
+  /// Subregister number for MO_Register.  A value of 0 indicates the
+  /// MO_Register has no subReg.
+  ///
+  /// For all other kinds of operands, this field holds target-specific flags.
+  unsigned SubReg_TargetFlags : 12;
+
+  /// TiedTo - Non-zero when this register operand is tied to another register
+  /// operand. The encoding of this field is described in the block comment
+  /// before MachineInstr::tieOperands().
+  unsigned TiedTo : 4;
+
+  /// IsDef - True if this is a def, false if this is a use of the register.
+  /// This is only valid on register operands.
+  ///
+  unsigned IsDef : 1;
+
+  /// IsImp - True if this is an implicit def or use, false if it is explicit.
+  /// This is only valid on register opderands.
+  ///
+  unsigned IsImp : 1;
+
+  /// IsDeadOrKill
+  /// For uses: IsKill - True if this instruction is the last use of the
+  /// register on this path through the function.
+  /// For defs: IsDead - True if this register is never used by a subsequent
+  /// instruction.
+  /// This is only valid on register operands.
+  unsigned IsDeadOrKill : 1;
+
+  /// See isRenamable().
+  unsigned IsRenamable : 1;
+
+  /// IsUndef - True if this register operand reads an "undef" value, i.e. the
+  /// read value doesn't matter.  This flag can be set on both use and def
+  /// operands.  On a sub-register def operand, it refers to the part of the
+  /// register that isn't written.  On a full-register def operand, it is a
+  /// noop.  See readsReg().
+  ///
+  /// This is only valid on registers.
+  ///
+  /// Note that an instruction may have multiple <undef> operands referring to
+  /// the same register.  In that case, the instruction may depend on those
+  /// operands reading the same dont-care value.  For example:
+  ///
+  ///   %1 = XOR undef %2, undef %2
+  ///
+  /// Any register can be used for %2, and its value doesn't matter, but
+  /// the two operands must be the same register.
+  ///
+  unsigned IsUndef : 1;
+
+  /// IsInternalRead - True if this operand reads a value that was defined
+  /// inside the same instruction or bundle.  This flag can be set on both use
+  /// and def operands.  On a sub-register def operand, it refers to the part
+  /// of the register that isn't written.  On a full-register def operand, it
+  /// is a noop.
+  ///
+  /// When this flag is set, the instruction bundle must contain at least one
+  /// other def of the register.  If multiple instructions in the bundle define
+  /// the register, the meaning is target-defined.
+  unsigned IsInternalRead : 1;
+
+  /// IsEarlyClobber - True if this MO_Register 'def' operand is written to
+  /// by the MachineInstr before all input registers are read.  This is used to
+  /// model the GCC inline asm '&' constraint modifier.
+  unsigned IsEarlyClobber : 1;
+
+  /// IsDebug - True if this MO_Register 'use' operand is in a debug pseudo,
+  /// not a real instruction.  Such uses should be ignored during codegen.
+  unsigned IsDebug : 1;
+
+  /// SmallContents - This really should be part of the Contents union, but
+  /// lives out here so we can get a better packed struct.
+  /// MO_Register: Register number.
+  /// OffsetedInfo: Low bits of offset.
+  union {
+    unsigned RegNo;           // For MO_Register.
+    unsigned OffsetLo;        // Matches Contents.OffsetedInfo.OffsetHi.
+  } SmallContents;
+
+  /// ParentMI - This is the instruction that this operand is embedded into.
+  /// This is valid for all operand types, when the operand is in an instr.
+  MachineInstr *ParentMI;
+
+  /// Contents union - This contains the payload for the various operand types.
+  union {
+    MachineBasicBlock *MBB;  // For MO_MachineBasicBlock.
+    const ConstantFP *CFP;   // For MO_FPImmediate.
+    const ConstantInt *CI;   // For MO_CImmediate. Integers > 64bit.
+    int64_t ImmVal;          // For MO_Immediate.
+    const uint32_t *RegMask; // For MO_RegisterMask and MO_RegisterLiveOut.
+    const MDNode *MD;        // For MO_Metadata.
+    MCSymbol *Sym;           // For MO_MCSymbol.
+    unsigned CFIIndex;       // For MO_CFI.
+    Intrinsic::ID IntrinsicID; // For MO_IntrinsicID.
+    unsigned Pred;           // For MO_Predicate
+
+    struct {                  // For MO_Register.
+      // Register number is in SmallContents.RegNo.
+      MachineOperand *Prev;   // Access list for register. See MRI.
+      MachineOperand *Next;
+    } Reg;
+
+    /// OffsetedInfo - This struct contains the offset and an object identifier.
+    /// this represent the object as with an optional offset from it.
+    struct {
+      union {
+        int Index;                // For MO_*Index - The index itself.
+        const char *SymbolName;   // For MO_ExternalSymbol.
+        const GlobalValue *GV;    // For MO_GlobalAddress.
+        const BlockAddress *BA;   // For MO_BlockAddress.
+      } Val;
+      // Low bits of offset are in SmallContents.OffsetLo.
+      int OffsetHi;               // An offset from the object, high 32 bits.
+    } OffsetedInfo;
+  } Contents;
+
+  explicit MachineOperand(MachineOperandType K)
+    : OpKind(K), SubReg_TargetFlags(0), ParentMI(nullptr) {
+    // Assert that the layout is what we expect. It's easy to grow this object.
+    static_assert(alignof(MachineOperand) <= alignof(int64_t),
+                  "MachineOperand shouldn't be more than 8 byte aligned");
+    static_assert(sizeof(Contents) <= 2 * sizeof(void *),
+                  "Contents should be at most two pointers");
+    static_assert(sizeof(MachineOperand) <=
+                      alignTo<alignof(int64_t)>(2 * sizeof(unsigned) +
+                                                3 * sizeof(void *)),
+                  "MachineOperand too big. Should be Kind, SmallContents, "
+                  "ParentMI, and Contents");
+  }
+
+public:
+  /// getType - Returns the MachineOperandType for this operand.
+  ///
+  MachineOperandType getType() const { return (MachineOperandType)OpKind; }
+
+  unsigned getTargetFlags() const {
+    return isReg() ? 0 : SubReg_TargetFlags;
+  }
+  void setTargetFlags(unsigned F) {
+    assert(!isReg() && "Register operands can't have target flags");
+    SubReg_TargetFlags = F;
+    assert(SubReg_TargetFlags == F && "Target flags out of range");
+  }
+  void addTargetFlag(unsigned F) {
+    assert(!isReg() && "Register operands can't have target flags");
+    SubReg_TargetFlags |= F;
+    assert((SubReg_TargetFlags & F) && "Target flags out of range");
+  }
+
+
+  /// getParent - Return the instruction that this operand belongs to.
+  ///
+  MachineInstr *getParent() { return ParentMI; }
+  const MachineInstr *getParent() const { return ParentMI; }
+
+  /// clearParent - Reset the parent pointer.
+  ///
+  /// The MachineOperand copy constructor also copies ParentMI, expecting the
+  /// original to be deleted. If a MachineOperand is ever stored outside a
+  /// MachineInstr, the parent pointer must be cleared.
+  ///
+  /// Never call clearParent() on an operand in a MachineInstr.
+  ///
+  void clearParent() { ParentMI = nullptr; }
+
+  /// Print a subreg index operand.
+  /// MO_Immediate operands can also be subreg idices. If it's the case, the
+  /// subreg index name will be printed. MachineInstr::isOperandSubregIdx can be
+  /// called to check this.
+  static void printSubRegIdx(raw_ostream &OS, uint64_t Index,
+                             const TargetRegisterInfo *TRI);
+
+  /// Print operand target flags.
+  static void printTargetFlags(raw_ostream& OS, const MachineOperand &Op);
+
+  /// Print a MCSymbol as an operand.
+  static void printSymbol(raw_ostream &OS, MCSymbol &Sym);
+
+  /// Print a stack object reference.
+  static void printStackObjectReference(raw_ostream &OS, unsigned FrameIndex,
+                                        bool IsFixed, StringRef Name);
+
+  /// Print the offset with explicit +/- signs.
+  static void printOperandOffset(raw_ostream &OS, int64_t Offset);
+
+  /// Print an IRSlotNumber.
+  static void printIRSlotNumber(raw_ostream &OS, int Slot);
+
+  /// Print the MachineOperand to \p os.
+  /// Providing a valid \p TRI and \p IntrinsicInfo results in a more
+  /// target-specific printing. If \p TRI and \p IntrinsicInfo are null, the
+  /// function will try to pick it up from the parent.
+  void print(raw_ostream &os, const TargetRegisterInfo *TRI = nullptr,
+             const TargetIntrinsicInfo *IntrinsicInfo = nullptr) const;
+
+  /// More complex way of printing a MachineOperand.
+  /// \param TypeToPrint specifies the generic type to be printed on uses and
+  /// defs. It can be determined using MachineInstr::getTypeToPrint.
+  /// \param PrintDef - whether we want to print `def` on an operand which
+  /// isDef. Sometimes, if the operand is printed before '=', we don't print
+  /// `def`.
+  /// \param IsStandalone - whether we want a verbose output of the MO. This
+  /// prints extra information that can be easily inferred when printing the
+  /// whole function, but not when printing only a fragment of it.
+  /// \param ShouldPrintRegisterTies - whether we want to print register ties.
+  /// Sometimes they are easily determined by the instruction's descriptor
+  /// (MachineInstr::hasComplexRegiterTies can determine if it's needed).
+  /// \param TiedOperandIdx - if we need to print register ties this needs to
+  /// provide the index of the tied register. If not, it will be ignored.
+  /// \param TRI - provide more target-specific information to the printer.
+  /// Unlike the previous function, this one will not try and get the
+  /// information from it's parent.
+  /// \param IntrinsicInfo - same as \p TRI.
+  void print(raw_ostream &os, ModuleSlotTracker &MST, LLT TypeToPrint,
+             bool PrintDef, bool IsStandalone, bool ShouldPrintRegisterTies,
+             unsigned TiedOperandIdx, const TargetRegisterInfo *TRI,
+             const TargetIntrinsicInfo *IntrinsicInfo) const;
+
+  void dump() const;
+
+  //===--------------------------------------------------------------------===//
+  // Accessors that tell you what kind of MachineOperand you're looking at.
+  //===--------------------------------------------------------------------===//
+
+  /// isReg - Tests if this is a MO_Register operand.
+  bool isReg() const { return OpKind == MO_Register; }
+  /// isImm - Tests if this is a MO_Immediate operand.
+  bool isImm() const { return OpKind == MO_Immediate; }
+  /// isCImm - Test if this is a MO_CImmediate operand.
+  bool isCImm() const { return OpKind == MO_CImmediate; }
+  /// isFPImm - Tests if this is a MO_FPImmediate operand.
+  bool isFPImm() const { return OpKind == MO_FPImmediate; }
+  /// isMBB - Tests if this is a MO_MachineBasicBlock operand.
+  bool isMBB() const { return OpKind == MO_MachineBasicBlock; }
+  /// isFI - Tests if this is a MO_FrameIndex operand.
+  bool isFI() const { return OpKind == MO_FrameIndex; }
+  /// isCPI - Tests if this is a MO_ConstantPoolIndex operand.
+  bool isCPI() const { return OpKind == MO_ConstantPoolIndex; }
+  /// isTargetIndex - Tests if this is a MO_TargetIndex operand.
+  bool isTargetIndex() const { return OpKind == MO_TargetIndex; }
+  /// isJTI - Tests if this is a MO_JumpTableIndex operand.
+  bool isJTI() const { return OpKind == MO_JumpTableIndex; }
+  /// isGlobal - Tests if this is a MO_GlobalAddress operand.
+  bool isGlobal() const { return OpKind == MO_GlobalAddress; }
+  /// isSymbol - Tests if this is a MO_ExternalSymbol operand.
+  bool isSymbol() const { return OpKind == MO_ExternalSymbol; }
+  /// isBlockAddress - Tests if this is a MO_BlockAddress operand.
+  bool isBlockAddress() const { return OpKind == MO_BlockAddress; }
+  /// isRegMask - Tests if this is a MO_RegisterMask operand.
+  bool isRegMask() const { return OpKind == MO_RegisterMask; }
+  /// isRegLiveOut - Tests if this is a MO_RegisterLiveOut operand.
+  bool isRegLiveOut() const { return OpKind == MO_RegisterLiveOut; }
+  /// isMetadata - Tests if this is a MO_Metadata operand.
+  bool isMetadata() const { return OpKind == MO_Metadata; }
+  bool isMCSymbol() const { return OpKind == MO_MCSymbol; }
+  bool isCFIIndex() const { return OpKind == MO_CFIIndex; }
+  bool isIntrinsicID() const { return OpKind == MO_IntrinsicID; }
+  bool isPredicate() const { return OpKind == MO_Predicate; }
+  //===--------------------------------------------------------------------===//
+  // Accessors for Register Operands
+  //===--------------------------------------------------------------------===//
+
+  /// getReg - Returns the register number.
+  unsigned getReg() const {
+    assert(isReg() && "This is not a register operand!");
+    return SmallContents.RegNo;
+  }
+
+  unsigned getSubReg() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return SubReg_TargetFlags;
+  }
+
+  bool isUse() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return !IsDef;
+  }
+
+  bool isDef() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return IsDef;
+  }
+
+  bool isImplicit() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return IsImp;
+  }
+
+  bool isDead() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return IsDeadOrKill & IsDef;
+  }
+
+  bool isKill() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return IsDeadOrKill & !IsDef;
+  }
+
+  bool isUndef() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return IsUndef;
+  }
+
+  /// isRenamable - Returns true if this register may be renamed, i.e. it does
+  /// not generate a value that is somehow read in a way that is not represented
+  /// by the Machine IR (e.g. to meet an ABI or ISA requirement).  This is only
+  /// valid on physical register operands.  Virtual registers are assumed to
+  /// always be renamable regardless of the value of this field.
+  ///
+  /// Operands that are renamable can freely be changed to any other register
+  /// that is a member of the register class returned by
+  /// MI->getRegClassConstraint().
+  ///
+  /// isRenamable can return false for several different reasons:
+  ///
+  /// - ABI constraints (since liveness is not always precisely modeled).  We
+  ///   conservatively handle these cases by setting all physical register
+  ///   operands that didn’t start out as virtual regs to not be renamable.
+  ///   Also any physical register operands created after register allocation or
+  ///   whose register is changed after register allocation will not be
+  ///   renamable.  This state is tracked in the MachineOperand::IsRenamable
+  ///   bit.
+  ///
+  /// - Opcode/target constraints: for opcodes that have complex register class
+  ///   requirements (e.g. that depend on other operands/instructions), we set
+  ///   hasExtraSrcRegAllocReq/hasExtraDstRegAllocReq in the machine opcode
+  ///   description.  Operands belonging to instructions with opcodes that are
+  ///   marked hasExtraSrcRegAllocReq/hasExtraDstRegAllocReq return false from
+  ///   isRenamable().  Additionally, the AllowRegisterRenaming target property
+  ///   prevents any operands from being marked renamable for targets that don't
+  ///   have detailed opcode hasExtraSrcRegAllocReq/hasExtraDstRegAllocReq
+  ///   values.
+  bool isRenamable() const;
+
+  bool isInternalRead() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return IsInternalRead;
+  }
+
+  bool isEarlyClobber() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return IsEarlyClobber;
+  }
+
+  bool isTied() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return TiedTo;
+  }
+
+  bool isDebug() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return IsDebug;
+  }
+
+  /// readsReg - Returns true if this operand reads the previous value of its
+  /// register.  A use operand with the <undef> flag set doesn't read its
+  /// register.  A sub-register def implicitly reads the other parts of the
+  /// register being redefined unless the <undef> flag is set.
+  ///
+  /// This refers to reading the register value from before the current
+  /// instruction or bundle. Internal bundle reads are not included.
+  bool readsReg() const {
+    assert(isReg() && "Wrong MachineOperand accessor");
+    return !isUndef() && !isInternalRead() && (isUse() || getSubReg());
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Mutators for Register Operands
+  //===--------------------------------------------------------------------===//
+
+  /// Change the register this operand corresponds to.
+  ///
+  void setReg(unsigned Reg);
+
+  void setSubReg(unsigned subReg) {
+    assert(isReg() && "Wrong MachineOperand mutator");
+    SubReg_TargetFlags = subReg;
+    assert(SubReg_TargetFlags == subReg && "SubReg out of range");
+  }
+
+  /// substVirtReg - Substitute the current register with the virtual
+  /// subregister Reg:SubReg. Take any existing SubReg index into account,
+  /// using TargetRegisterInfo to compose the subreg indices if necessary.
+  /// Reg must be a virtual register, SubIdx can be 0.
+  ///
+  void substVirtReg(unsigned Reg, unsigned SubIdx, const TargetRegisterInfo&);
+
+  /// substPhysReg - Substitute the current register with the physical register
+  /// Reg, taking any existing SubReg into account. For instance,
+  /// substPhysReg(%eax) will change %reg1024:sub_8bit to %al.
+  ///
+  void substPhysReg(unsigned Reg, const TargetRegisterInfo&);
+
+  void setIsUse(bool Val = true) { setIsDef(!Val); }
+
+  /// Change a def to a use, or a use to a def.
+  void setIsDef(bool Val = true);
+
+  void setImplicit(bool Val = true) {
+    assert(isReg() && "Wrong MachineOperand mutator");
+    IsImp = Val;
+  }
+
+  void setIsKill(bool Val = true) {
+    assert(isReg() && !IsDef && "Wrong MachineOperand mutator");
+    assert((!Val || !isDebug()) && "Marking a debug operation as kill");
+    IsDeadOrKill = Val;
+  }
+
+  void setIsDead(bool Val = true) {
+    assert(isReg() && IsDef && "Wrong MachineOperand mutator");
+    IsDeadOrKill = Val;
+  }
+
+  void setIsUndef(bool Val = true) {
+    assert(isReg() && "Wrong MachineOperand mutator");
+    IsUndef = Val;
+  }
+
+  void setIsRenamable(bool Val = true);
+
+  void setIsInternalRead(bool Val = true) {
+    assert(isReg() && "Wrong MachineOperand mutator");
+    IsInternalRead = Val;
+  }
+
+  void setIsEarlyClobber(bool Val = true) {
+    assert(isReg() && IsDef && "Wrong MachineOperand mutator");
+    IsEarlyClobber = Val;
+  }
+
+  void setIsDebug(bool Val = true) {
+    assert(isReg() && !IsDef && "Wrong MachineOperand mutator");
+    IsDebug = Val;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Accessors for various operand types.
+  //===--------------------------------------------------------------------===//
+
+  int64_t getImm() const {
+    assert(isImm() && "Wrong MachineOperand accessor");
+    return Contents.ImmVal;
+  }
+
+  const ConstantInt *getCImm() const {
+    assert(isCImm() && "Wrong MachineOperand accessor");
+    return Contents.CI;
+  }
+
+  const ConstantFP *getFPImm() const {
+    assert(isFPImm() && "Wrong MachineOperand accessor");
+    return Contents.CFP;
+  }
+
+  MachineBasicBlock *getMBB() const {
+    assert(isMBB() && "Wrong MachineOperand accessor");
+    return Contents.MBB;
+  }
+
+  int getIndex() const {
+    assert((isFI() || isCPI() || isTargetIndex() || isJTI()) &&
+           "Wrong MachineOperand accessor");
+    return Contents.OffsetedInfo.Val.Index;
+  }
+
+  const GlobalValue *getGlobal() const {
+    assert(isGlobal() && "Wrong MachineOperand accessor");
+    return Contents.OffsetedInfo.Val.GV;
+  }
+
+  const BlockAddress *getBlockAddress() const {
+    assert(isBlockAddress() && "Wrong MachineOperand accessor");
+    return Contents.OffsetedInfo.Val.BA;
+  }
+
+  MCSymbol *getMCSymbol() const {
+    assert(isMCSymbol() && "Wrong MachineOperand accessor");
+    return Contents.Sym;
+  }
+
+  unsigned getCFIIndex() const {
+    assert(isCFIIndex() && "Wrong MachineOperand accessor");
+    return Contents.CFIIndex;
+  }
+
+  Intrinsic::ID getIntrinsicID() const {
+    assert(isIntrinsicID() && "Wrong MachineOperand accessor");
+    return Contents.IntrinsicID;
+  }
+
+  unsigned getPredicate() const {
+    assert(isPredicate() && "Wrong MachineOperand accessor");
+    return Contents.Pred;
+  }
+
+  /// Return the offset from the symbol in this operand. This always returns 0
+  /// for ExternalSymbol operands.
+  int64_t getOffset() const {
+    assert((isGlobal() || isSymbol() || isMCSymbol() || isCPI() ||
+            isTargetIndex() || isBlockAddress()) &&
+           "Wrong MachineOperand accessor");
+    return int64_t(uint64_t(Contents.OffsetedInfo.OffsetHi) << 32) |
+           SmallContents.OffsetLo;
+  }
+
+  const char *getSymbolName() const {
+    assert(isSymbol() && "Wrong MachineOperand accessor");
+    return Contents.OffsetedInfo.Val.SymbolName;
+  }
+
+  /// clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
+  /// It is sometimes necessary to detach the register mask pointer from its
+  /// machine operand. This static method can be used for such detached bit
+  /// mask pointers.
+  static bool clobbersPhysReg(const uint32_t *RegMask, unsigned PhysReg) {
+    // See TargetRegisterInfo.h.
+    assert(PhysReg < (1u << 30) && "Not a physical register");
+    return !(RegMask[PhysReg / 32] & (1u << PhysReg % 32));
+  }
+
+  /// clobbersPhysReg - Returns true if this RegMask operand clobbers PhysReg.
+  bool clobbersPhysReg(unsigned PhysReg) const {
+     return clobbersPhysReg(getRegMask(), PhysReg);
+  }
+
+  /// getRegMask - Returns a bit mask of registers preserved by this RegMask
+  /// operand.
+  const uint32_t *getRegMask() const {
+    assert(isRegMask() && "Wrong MachineOperand accessor");
+    return Contents.RegMask;
+  }
+
+  /// getRegLiveOut - Returns a bit mask of live-out registers.
+  const uint32_t *getRegLiveOut() const {
+    assert(isRegLiveOut() && "Wrong MachineOperand accessor");
+    return Contents.RegMask;
+  }
+
+  const MDNode *getMetadata() const {
+    assert(isMetadata() && "Wrong MachineOperand accessor");
+    return Contents.MD;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Mutators for various operand types.
+  //===--------------------------------------------------------------------===//
+
+  void setImm(int64_t immVal) {
+    assert(isImm() && "Wrong MachineOperand mutator");
+    Contents.ImmVal = immVal;
+  }
+
+  void setFPImm(const ConstantFP *CFP) {
+    assert(isFPImm() && "Wrong MachineOperand mutator");
+    Contents.CFP = CFP;
+  }
+
+  void setOffset(int64_t Offset) {
+    assert((isGlobal() || isSymbol() || isMCSymbol() || isCPI() ||
+            isTargetIndex() || isBlockAddress()) &&
+           "Wrong MachineOperand mutator");
+    SmallContents.OffsetLo = unsigned(Offset);
+    Contents.OffsetedInfo.OffsetHi = int(Offset >> 32);
+  }
+
+  void setIndex(int Idx) {
+    assert((isFI() || isCPI() || isTargetIndex() || isJTI()) &&
+           "Wrong MachineOperand mutator");
+    Contents.OffsetedInfo.Val.Index = Idx;
+  }
+
+  void setMetadata(const MDNode *MD) {
+    assert(isMetadata() && "Wrong MachineOperand mutator");
+    Contents.MD = MD;
+  }
+
+  void setMBB(MachineBasicBlock *MBB) {
+    assert(isMBB() && "Wrong MachineOperand mutator");
+    Contents.MBB = MBB;
+  }
+
+  /// Sets value of register mask operand referencing Mask.  The
+  /// operand does not take ownership of the memory referenced by Mask, it must
+  /// remain valid for the lifetime of the operand. See CreateRegMask().
+  /// Any physreg with a 0 bit in the mask is clobbered by the instruction.
+  void setRegMask(const uint32_t *RegMaskPtr) {
+    assert(isRegMask() && "Wrong MachineOperand mutator");
+    Contents.RegMask = RegMaskPtr;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Other methods.
+  //===--------------------------------------------------------------------===//
+
+  /// Returns true if this operand is identical to the specified operand except
+  /// for liveness related flags (isKill, isUndef and isDead). Note that this
+  /// should stay in sync with the hash_value overload below.
+  bool isIdenticalTo(const MachineOperand &Other) const;
+
+  /// \brief MachineOperand hash_value overload.
+  ///
+  /// Note that this includes the same information in the hash that
+  /// isIdenticalTo uses for comparison. It is thus suited for use in hash
+  /// tables which use that function for equality comparisons only. This must
+  /// stay exactly in sync with isIdenticalTo above.
+  friend hash_code hash_value(const MachineOperand &MO);
+
+  /// ChangeToImmediate - Replace this operand with a new immediate operand of
+  /// the specified value.  If an operand is known to be an immediate already,
+  /// the setImm method should be used.
+  void ChangeToImmediate(int64_t ImmVal);
+
+  /// ChangeToFPImmediate - Replace this operand with a new FP immediate operand
+  /// of the specified value.  If an operand is known to be an FP immediate
+  /// already, the setFPImm method should be used.
+  void ChangeToFPImmediate(const ConstantFP *FPImm);
+
+  /// ChangeToES - Replace this operand with a new external symbol operand.
+  void ChangeToES(const char *SymName, unsigned char TargetFlags = 0);
+
+  /// ChangeToMCSymbol - Replace this operand with a new MC symbol operand.
+  void ChangeToMCSymbol(MCSymbol *Sym);
+
+  /// Replace this operand with a frame index.
+  void ChangeToFrameIndex(int Idx);
+
+  /// Replace this operand with a target index.
+  void ChangeToTargetIndex(unsigned Idx, int64_t Offset,
+                           unsigned char TargetFlags = 0);
+
+  /// ChangeToRegister - Replace this operand with a new register operand of
+  /// the specified value.  If an operand is known to be an register already,
+  /// the setReg method should be used.
+  void ChangeToRegister(unsigned Reg, bool isDef, bool isImp = false,
+                        bool isKill = false, bool isDead = false,
+                        bool isUndef = false, bool isDebug = false);
+
+  //===--------------------------------------------------------------------===//
+  // Construction methods.
+  //===--------------------------------------------------------------------===//
+
+  static MachineOperand CreateImm(int64_t Val) {
+    MachineOperand Op(MachineOperand::MO_Immediate);
+    Op.setImm(Val);
+    return Op;
+  }
+
+  static MachineOperand CreateCImm(const ConstantInt *CI) {
+    MachineOperand Op(MachineOperand::MO_CImmediate);
+    Op.Contents.CI = CI;
+    return Op;
+  }
+
+  static MachineOperand CreateFPImm(const ConstantFP *CFP) {
+    MachineOperand Op(MachineOperand::MO_FPImmediate);
+    Op.Contents.CFP = CFP;
+    return Op;
+  }
+
+  static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp = false,
+                                  bool isKill = false, bool isDead = false,
+                                  bool isUndef = false,
+                                  bool isEarlyClobber = false,
+                                  unsigned SubReg = 0, bool isDebug = false,
+                                  bool isInternalRead = false,
+                                  bool isRenamable = false) {
+    assert(!(isDead && !isDef) && "Dead flag on non-def");
+    assert(!(isKill && isDef) && "Kill flag on def");
+    MachineOperand Op(MachineOperand::MO_Register);
+    Op.IsDef = isDef;
+    Op.IsImp = isImp;
+    Op.IsDeadOrKill = isKill | isDead;
+    Op.IsRenamable = isRenamable;
+    Op.IsUndef = isUndef;
+    Op.IsInternalRead = isInternalRead;
+    Op.IsEarlyClobber = isEarlyClobber;
+    Op.TiedTo = 0;
+    Op.IsDebug = isDebug;
+    Op.SmallContents.RegNo = Reg;
+    Op.Contents.Reg.Prev = nullptr;
+    Op.Contents.Reg.Next = nullptr;
+    Op.setSubReg(SubReg);
+    return Op;
+  }
+  static MachineOperand CreateMBB(MachineBasicBlock *MBB,
+                                  unsigned char TargetFlags = 0) {
+    MachineOperand Op(MachineOperand::MO_MachineBasicBlock);
+    Op.setMBB(MBB);
+    Op.setTargetFlags(TargetFlags);
+    return Op;
+  }
+  static MachineOperand CreateFI(int Idx) {
+    MachineOperand Op(MachineOperand::MO_FrameIndex);
+    Op.setIndex(Idx);
+    return Op;
+  }
+  static MachineOperand CreateCPI(unsigned Idx, int Offset,
+                                  unsigned char TargetFlags = 0) {
+    MachineOperand Op(MachineOperand::MO_ConstantPoolIndex);
+    Op.setIndex(Idx);
+    Op.setOffset(Offset);
+    Op.setTargetFlags(TargetFlags);
+    return Op;
+  }
+  static MachineOperand CreateTargetIndex(unsigned Idx, int64_t Offset,
+                                          unsigned char TargetFlags = 0) {
+    MachineOperand Op(MachineOperand::MO_TargetIndex);
+    Op.setIndex(Idx);
+    Op.setOffset(Offset);
+    Op.setTargetFlags(TargetFlags);
+    return Op;
+  }
+  static MachineOperand CreateJTI(unsigned Idx, unsigned char TargetFlags = 0) {
+    MachineOperand Op(MachineOperand::MO_JumpTableIndex);
+    Op.setIndex(Idx);
+    Op.setTargetFlags(TargetFlags);
+    return Op;
+  }
+  static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset,
+                                 unsigned char TargetFlags = 0) {
+    MachineOperand Op(MachineOperand::MO_GlobalAddress);
+    Op.Contents.OffsetedInfo.Val.GV = GV;
+    Op.setOffset(Offset);
+    Op.setTargetFlags(TargetFlags);
+    return Op;
+  }
+  static MachineOperand CreateES(const char *SymName,
+                                 unsigned char TargetFlags = 0) {
+    MachineOperand Op(MachineOperand::MO_ExternalSymbol);
+    Op.Contents.OffsetedInfo.Val.SymbolName = SymName;
+    Op.setOffset(0); // Offset is always 0.
+    Op.setTargetFlags(TargetFlags);
+    return Op;
+  }
+  static MachineOperand CreateBA(const BlockAddress *BA, int64_t Offset,
+                                 unsigned char TargetFlags = 0) {
+    MachineOperand Op(MachineOperand::MO_BlockAddress);
+    Op.Contents.OffsetedInfo.Val.BA = BA;
+    Op.setOffset(Offset);
+    Op.setTargetFlags(TargetFlags);
+    return Op;
+  }
+  /// CreateRegMask - Creates a register mask operand referencing Mask.  The
+  /// operand does not take ownership of the memory referenced by Mask, it
+  /// must remain valid for the lifetime of the operand.
+  ///
+  /// A RegMask operand represents a set of non-clobbered physical registers
+  /// on an instruction that clobbers many registers, typically a call.  The
+  /// bit mask has a bit set for each physreg that is preserved by this
+  /// instruction, as described in the documentation for
+  /// TargetRegisterInfo::getCallPreservedMask().
+  ///
+  /// Any physreg with a 0 bit in the mask is clobbered by the instruction.
+  ///
+  static MachineOperand CreateRegMask(const uint32_t *Mask) {
+    assert(Mask && "Missing register mask");
+    MachineOperand Op(MachineOperand::MO_RegisterMask);
+    Op.Contents.RegMask = Mask;
+    return Op;
+  }
+  static MachineOperand CreateRegLiveOut(const uint32_t *Mask) {
+    assert(Mask && "Missing live-out register mask");
+    MachineOperand Op(MachineOperand::MO_RegisterLiveOut);
+    Op.Contents.RegMask = Mask;
+    return Op;
+  }
+  static MachineOperand CreateMetadata(const MDNode *Meta) {
+    MachineOperand Op(MachineOperand::MO_Metadata);
+    Op.Contents.MD = Meta;
+    return Op;
+  }
+
+  static MachineOperand CreateMCSymbol(MCSymbol *Sym,
+                                       unsigned char TargetFlags = 0) {
+    MachineOperand Op(MachineOperand::MO_MCSymbol);
+    Op.Contents.Sym = Sym;
+    Op.setOffset(0);
+    Op.setTargetFlags(TargetFlags);
+    return Op;
+  }
+
+  static MachineOperand CreateCFIIndex(unsigned CFIIndex) {
+    MachineOperand Op(MachineOperand::MO_CFIIndex);
+    Op.Contents.CFIIndex = CFIIndex;
+    return Op;
+  }
+
+  static MachineOperand CreateIntrinsicID(Intrinsic::ID ID) {
+    MachineOperand Op(MachineOperand::MO_IntrinsicID);
+    Op.Contents.IntrinsicID = ID;
+    return Op;
+  }
+
+  static MachineOperand CreatePredicate(unsigned Pred) {
+    MachineOperand Op(MachineOperand::MO_Predicate);
+    Op.Contents.Pred = Pred;
+    return Op;
+  }
+
+  friend class MachineInstr;
+  friend class MachineRegisterInfo;
+
+private:
+  // If this operand is currently a register operand, and if this is in a
+  // function, deregister the operand from the register's use/def list.
+  void removeRegFromUses();
+
+  /// Artificial kinds for DenseMap usage.
+  enum : unsigned char {
+    MO_Empty = MO_Last + 1,
+    MO_Tombstone,
+  };
+
+  friend struct DenseMapInfo<MachineOperand>;
+
+  //===--------------------------------------------------------------------===//
+  // Methods for handling register use/def lists.
+  //===--------------------------------------------------------------------===//
+
+  /// isOnRegUseList - Return true if this operand is on a register use/def
+  /// list or false if not.  This can only be called for register operands
+  /// that are part of a machine instruction.
+  bool isOnRegUseList() const {
+    assert(isReg() && "Can only add reg operand to use lists");
+    return Contents.Reg.Prev != nullptr;
+  }
+};
+
+template <> struct DenseMapInfo<MachineOperand> {
+  static MachineOperand getEmptyKey() {
+    return MachineOperand(static_cast<MachineOperand::MachineOperandType>(
+        MachineOperand::MO_Empty));
+  }
+  static MachineOperand getTombstoneKey() {
+    return MachineOperand(static_cast<MachineOperand::MachineOperandType>(
+        MachineOperand::MO_Tombstone));
+  }
+  static unsigned getHashValue(const MachineOperand &MO) {
+    return hash_value(MO);
+  }
+  static bool isEqual(const MachineOperand &LHS, const MachineOperand &RHS) {
+    if (LHS.getType() == static_cast<MachineOperand::MachineOperandType>(
+                             MachineOperand::MO_Empty) ||
+        LHS.getType() == static_cast<MachineOperand::MachineOperandType>(
+                             MachineOperand::MO_Tombstone))
+      return LHS.getType() == RHS.getType();
+    return LHS.isIdenticalTo(RHS);
+  }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const MachineOperand &MO) {
+  MO.print(OS);
+  return OS;
+}
+
+// See friend declaration above. This additional declaration is required in
+// order to compile LLVM with IBM xlC compiler.
+hash_code hash_value(const MachineOperand &MO);
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h b/linux-x64/clang/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h
new file mode 100644
index 0000000..2fdefbe
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h
@@ -0,0 +1,224 @@
+///===- MachineOptimizationRemarkEmitter.h - Opt Diagnostics -*- C++ -*----===//
+///
+///                     The LLVM Compiler Infrastructure
+///
+/// This file is distributed under the University of Illinois Open Source
+/// License. See LICENSE.TXT for details.
+///
+///===---------------------------------------------------------------------===//
+/// \file
+/// Optimization diagnostic interfaces for machine passes.  It's packaged as an
+/// analysis pass so that by using this service passes become dependent on MBFI
+/// as well.  MBFI is used to compute the "hotness" of the diagnostic message.
+///
+///===---------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEOPTIMIZATIONREMARKEMITTER_H
+#define LLVM_CODEGEN_MACHINEOPTIMIZATIONREMARKEMITTER_H
+
+#include "llvm/Analysis/OptimizationRemarkEmitter.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+class MachineBasicBlock;
+class MachineBlockFrequencyInfo;
+class MachineInstr;
+
+/// \brief Common features for diagnostics dealing with optimization remarks
+/// that are used by machine passes.
+class DiagnosticInfoMIROptimization : public DiagnosticInfoOptimizationBase {
+public:
+  DiagnosticInfoMIROptimization(enum DiagnosticKind Kind, const char *PassName,
+                                StringRef RemarkName,
+                                const DiagnosticLocation &Loc,
+                                const MachineBasicBlock *MBB)
+      : DiagnosticInfoOptimizationBase(Kind, DS_Remark, PassName, RemarkName,
+                                       MBB->getParent()->getFunction(), Loc),
+        MBB(MBB) {}
+
+  /// MI-specific kinds of diagnostic Arguments.
+  struct MachineArgument : public DiagnosticInfoOptimizationBase::Argument {
+    /// Print an entire MachineInstr.
+    MachineArgument(StringRef Key, const MachineInstr &MI);
+  };
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() >= DK_FirstMachineRemark &&
+           DI->getKind() <= DK_LastMachineRemark;
+  }
+
+  const MachineBasicBlock *getBlock() const { return MBB; }
+
+private:
+  const MachineBasicBlock *MBB;
+};
+
+/// Diagnostic information for applied optimization remarks.
+class MachineOptimizationRemark : public DiagnosticInfoMIROptimization {
+public:
+  /// \p PassName is the name of the pass emitting this diagnostic. If this name
+  /// matches the regular expression given in -Rpass=, then the diagnostic will
+  /// be emitted.  \p RemarkName is a textual identifier for the remark.  \p
+  /// Loc is the debug location and \p MBB is the block that the optimization
+  /// operates in.
+  MachineOptimizationRemark(const char *PassName, StringRef RemarkName,
+                            const DiagnosticLocation &Loc,
+                            const MachineBasicBlock *MBB)
+      : DiagnosticInfoMIROptimization(DK_MachineOptimizationRemark, PassName,
+                                      RemarkName, Loc, MBB) {}
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_MachineOptimizationRemark;
+  }
+
+  /// \see DiagnosticInfoOptimizationBase::isEnabled.
+  bool isEnabled() const override {
+    const Function &Fn = getFunction();
+    LLVMContext &Ctx = Fn.getContext();
+    return Ctx.getDiagHandlerPtr()->isPassedOptRemarkEnabled(getPassName());
+  }
+};
+
+/// Diagnostic information for missed-optimization remarks.
+class MachineOptimizationRemarkMissed : public DiagnosticInfoMIROptimization {
+public:
+  /// \p PassName is the name of the pass emitting this diagnostic. If this name
+  /// matches the regular expression given in -Rpass-missed=, then the
+  /// diagnostic will be emitted.  \p RemarkName is a textual identifier for the
+  /// remark.  \p Loc is the debug location and \p MBB is the block that the
+  /// optimization operates in.
+  MachineOptimizationRemarkMissed(const char *PassName, StringRef RemarkName,
+                                  const DiagnosticLocation &Loc,
+                                  const MachineBasicBlock *MBB)
+      : DiagnosticInfoMIROptimization(DK_MachineOptimizationRemarkMissed,
+                                      PassName, RemarkName, Loc, MBB) {}
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_MachineOptimizationRemarkMissed;
+  }
+
+  /// \see DiagnosticInfoOptimizationBase::isEnabled.
+  bool isEnabled() const override {
+    const Function &Fn = getFunction();
+    LLVMContext &Ctx = Fn.getContext();
+    return Ctx.getDiagHandlerPtr()->isMissedOptRemarkEnabled(getPassName());
+  }
+};
+
+/// Diagnostic information for optimization analysis remarks.
+class MachineOptimizationRemarkAnalysis : public DiagnosticInfoMIROptimization {
+public:
+  /// \p PassName is the name of the pass emitting this diagnostic. If this name
+  /// matches the regular expression given in -Rpass-analysis=, then the
+  /// diagnostic will be emitted.  \p RemarkName is a textual identifier for the
+  /// remark.  \p Loc is the debug location and \p MBB is the block that the
+  /// optimization operates in.
+  MachineOptimizationRemarkAnalysis(const char *PassName, StringRef RemarkName,
+                                    const DiagnosticLocation &Loc,
+                                    const MachineBasicBlock *MBB)
+      : DiagnosticInfoMIROptimization(DK_MachineOptimizationRemarkAnalysis,
+                                      PassName, RemarkName, Loc, MBB) {}
+
+  static bool classof(const DiagnosticInfo *DI) {
+    return DI->getKind() == DK_MachineOptimizationRemarkAnalysis;
+  }
+
+  /// \see DiagnosticInfoOptimizationBase::isEnabled.
+  bool isEnabled() const override {
+    const Function &Fn = getFunction();
+    LLVMContext &Ctx = Fn.getContext();
+    return Ctx.getDiagHandlerPtr()->isAnalysisRemarkEnabled(getPassName());
+  }
+};
+
+/// Extend llvm::ore:: with MI-specific helper names.
+namespace ore {
+using MNV = DiagnosticInfoMIROptimization::MachineArgument;
+}
+
+/// The optimization diagnostic interface.
+///
+/// It allows reporting when optimizations are performed and when they are not
+/// along with the reasons for it.  Hotness information of the corresponding
+/// code region can be included in the remark if DiagnosticsHotnessRequested is
+/// enabled in the LLVM context.
+class MachineOptimizationRemarkEmitter {
+public:
+  MachineOptimizationRemarkEmitter(MachineFunction &MF,
+                                   MachineBlockFrequencyInfo *MBFI)
+      : MF(MF), MBFI(MBFI) {}
+
+  /// Emit an optimization remark.
+  void emit(DiagnosticInfoOptimizationBase &OptDiag);
+
+  /// \brief Whether we allow for extra compile-time budget to perform more
+  /// analysis to be more informative.
+  ///
+  /// This is useful to enable additional missed optimizations to be reported
+  /// that are normally too noisy.  In this mode, we can use the extra analysis
+  /// (1) to filter trivial false positives or (2) to provide more context so
+  /// that non-trivial false positives can be quickly detected by the user.
+  bool allowExtraAnalysis(StringRef PassName) const {
+    return (MF.getFunction().getContext().getDiagnosticsOutputFile() ||
+            MF.getFunction().getContext()
+            .getDiagHandlerPtr()->isAnyRemarkEnabled(PassName));
+  }
+
+  /// \brief Take a lambda that returns a remark which will be emitted.  Second
+  /// argument is only used to restrict this to functions.
+  template <typename T>
+  void emit(T RemarkBuilder, decltype(RemarkBuilder()) * = nullptr) {
+    // Avoid building the remark unless we know there are at least *some*
+    // remarks enabled. We can't currently check whether remarks are requested
+    // for the calling pass since that requires actually building the remark.
+
+    if (MF.getFunction().getContext().getDiagnosticsOutputFile() ||
+        MF.getFunction().getContext().getDiagHandlerPtr()->isAnyRemarkEnabled()) {
+      auto R = RemarkBuilder();
+      emit((DiagnosticInfoOptimizationBase &)R);
+    }
+  }
+
+private:
+  MachineFunction &MF;
+
+  /// MBFI is only set if hotness is requested.
+  MachineBlockFrequencyInfo *MBFI;
+
+  /// Compute hotness from IR value (currently assumed to be a block) if PGO is
+  /// available.
+  Optional<uint64_t> computeHotness(const MachineBasicBlock &MBB);
+
+  /// Similar but use value from \p OptDiag and update hotness there.
+  void computeHotness(DiagnosticInfoMIROptimization &Remark);
+
+  /// \brief Only allow verbose messages if we know we're filtering by hotness
+  /// (BFI is only set in this case).
+  bool shouldEmitVerbose() { return MBFI != nullptr; }
+};
+
+/// The analysis pass
+///
+/// Note that this pass shouldn't generally be marked as preserved by other
+/// passes.  It's holding onto BFI, so if the pass does not preserve BFI, BFI
+/// could be freed.
+class MachineOptimizationRemarkEmitterPass : public MachineFunctionPass {
+  std::unique_ptr<MachineOptimizationRemarkEmitter> ORE;
+
+public:
+  MachineOptimizationRemarkEmitterPass();
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  MachineOptimizationRemarkEmitter &getORE() {
+    assert(ORE && "pass not run yet");
+    return *ORE;
+  }
+
+  static char ID;
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachinePassRegistry.h b/linux-x64/clang/include/llvm/CodeGen/MachinePassRegistry.h
new file mode 100644
index 0000000..3aba0bb
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachinePassRegistry.h
@@ -0,0 +1,142 @@
+//===- llvm/CodeGen/MachinePassRegistry.h -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the mechanics for machine function pass registries.  A
+// function pass registry (MachinePassRegistry) is auto filled by the static
+// constructors of MachinePassRegistryNode.  Further there is a command line
+// parser (RegisterPassParser) which listens to each registry for additions
+// and deletions, so that the appropriate command option is updated.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEPASSREGISTRY_H
+#define LLVM_CODEGEN_MACHINEPASSREGISTRY_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/CommandLine.h"
+
+namespace llvm {
+
+using MachinePassCtor = void *(*)();
+
+//===----------------------------------------------------------------------===//
+///
+/// MachinePassRegistryListener - Listener to adds and removals of nodes in
+/// registration list.
+///
+//===----------------------------------------------------------------------===//
+class MachinePassRegistryListener {
+  virtual void anchor();
+
+public:
+  MachinePassRegistryListener() = default;
+  virtual ~MachinePassRegistryListener() = default;
+
+  virtual void NotifyAdd(StringRef N, MachinePassCtor C, StringRef D) = 0;
+  virtual void NotifyRemove(StringRef N) = 0;
+};
+
+//===----------------------------------------------------------------------===//
+///
+/// MachinePassRegistryNode - Machine pass node stored in registration list.
+///
+//===----------------------------------------------------------------------===//
+class MachinePassRegistryNode {
+private:
+  MachinePassRegistryNode *Next = nullptr; // Next function pass in list.
+  StringRef Name;                       // Name of function pass.
+  StringRef Description;                // Description string.
+  MachinePassCtor Ctor;                 // Function pass creator.
+
+public:
+  MachinePassRegistryNode(const char *N, const char *D, MachinePassCtor C)
+      : Name(N), Description(D), Ctor(C) {}
+
+  // Accessors
+  MachinePassRegistryNode *getNext()      const { return Next; }
+  MachinePassRegistryNode **getNextAddress()    { return &Next; }
+  StringRef getName()                   const { return Name; }
+  StringRef getDescription()            const { return Description; }
+  MachinePassCtor getCtor()               const { return Ctor; }
+  void setNext(MachinePassRegistryNode *N)      { Next = N; }
+};
+
+//===----------------------------------------------------------------------===//
+///
+/// MachinePassRegistry - Track the registration of machine passes.
+///
+//===----------------------------------------------------------------------===//
+class MachinePassRegistry {
+private:
+  MachinePassRegistryNode *List;        // List of registry nodes.
+  MachinePassCtor Default;              // Default function pass creator.
+  MachinePassRegistryListener *Listener; // Listener for list adds are removes.
+
+public:
+  // NO CONSTRUCTOR - we don't want static constructor ordering to mess
+  // with the registry.
+
+  // Accessors.
+  //
+  MachinePassRegistryNode *getList()                    { return List; }
+  MachinePassCtor getDefault()                          { return Default; }
+  void setDefault(MachinePassCtor C)                    { Default = C; }
+  void setDefault(StringRef Name);
+  void setListener(MachinePassRegistryListener *L)      { Listener = L; }
+
+  /// Add - Adds a function pass to the registration list.
+  ///
+  void Add(MachinePassRegistryNode *Node);
+
+  /// Remove - Removes a function pass from the registration list.
+  ///
+  void Remove(MachinePassRegistryNode *Node);
+};
+
+//===----------------------------------------------------------------------===//
+///
+/// RegisterPassParser class - Handle the addition of new machine passes.
+///
+//===----------------------------------------------------------------------===//
+template<class RegistryClass>
+class RegisterPassParser : public MachinePassRegistryListener,
+                   public cl::parser<typename RegistryClass::FunctionPassCtor> {
+public:
+  RegisterPassParser(cl::Option &O)
+      : cl::parser<typename RegistryClass::FunctionPassCtor>(O) {}
+  ~RegisterPassParser() override { RegistryClass::setListener(nullptr); }
+
+  void initialize() {
+    cl::parser<typename RegistryClass::FunctionPassCtor>::initialize();
+
+    // Add existing passes to option.
+    for (RegistryClass *Node = RegistryClass::getList();
+         Node; Node = Node->getNext()) {
+      this->addLiteralOption(Node->getName(),
+                      (typename RegistryClass::FunctionPassCtor)Node->getCtor(),
+                             Node->getDescription());
+    }
+
+    // Make sure we listen for list changes.
+    RegistryClass::setListener(this);
+  }
+
+  // Implement the MachinePassRegistryListener callbacks.
+  void NotifyAdd(StringRef N, MachinePassCtor C, StringRef D) override {
+    this->addLiteralOption(N, (typename RegistryClass::FunctionPassCtor)C, D);
+  }
+  void NotifyRemove(StringRef N) override {
+    this->removeLiteralOption(N);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEPASSREGISTRY_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachinePostDominators.h b/linux-x64/clang/include/llvm/CodeGen/MachinePostDominators.h
new file mode 100644
index 0000000..c6a4159
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachinePostDominators.h
@@ -0,0 +1,86 @@
+//=- llvm/CodeGen/MachineDominators.h ----------------------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes interfaces to post dominance information for
+// target-specific code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEPOSTDOMINATORS_H
+#define LLVM_CODEGEN_MACHINEPOSTDOMINATORS_H
+
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+///
+/// PostDominatorTree Class - Concrete subclass of DominatorTree that is used
+/// to compute the post-dominator tree.
+///
+struct MachinePostDominatorTree : public MachineFunctionPass {
+private:
+ PostDomTreeBase<MachineBasicBlock> *DT;
+
+public:
+  static char ID;
+
+  MachinePostDominatorTree();
+
+  ~MachinePostDominatorTree() override;
+
+  FunctionPass *createMachinePostDominatorTreePass();
+
+  const SmallVectorImpl<MachineBasicBlock *> &getRoots() const {
+    return DT->getRoots();
+  }
+
+  MachineDomTreeNode *getRootNode() const {
+    return DT->getRootNode();
+  }
+
+  MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
+    return DT->getNode(BB);
+  }
+
+  MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
+    return DT->getNode(BB);
+  }
+
+  bool dominates(const MachineDomTreeNode *A,
+                 const MachineDomTreeNode *B) const {
+    return DT->dominates(A, B);
+  }
+
+  bool dominates(const MachineBasicBlock *A, const MachineBasicBlock *B) const {
+    return DT->dominates(A, B);
+  }
+
+  bool properlyDominates(const MachineDomTreeNode *A,
+                         const MachineDomTreeNode *B) const {
+    return DT->properlyDominates(A, B);
+  }
+
+  bool properlyDominates(const MachineBasicBlock *A,
+                         const MachineBasicBlock *B) const {
+    return DT->properlyDominates(A, B);
+  }
+
+  MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A,
+                                                MachineBasicBlock *B) {
+    return DT->findNearestCommonDominator(A, B);
+  }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+  void print(llvm::raw_ostream &OS, const Module *M = nullptr) const override;
+};
+} //end of namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineRegionInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineRegionInfo.h
new file mode 100644
index 0000000..8394b58
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineRegionInfo.h
@@ -0,0 +1,182 @@
+//===- llvm/CodeGen/MachineRegionInfo.h -------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEREGIONINFO_H
+#define LLVM_CODEGEN_MACHINEREGIONINFO_H
+
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/Analysis/RegionInfo.h"
+#include "llvm/Analysis/RegionIterator.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineDominanceFrontier.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include <cassert>
+
+namespace llvm {
+
+struct MachinePostDominatorTree;
+class MachineRegion;
+class MachineRegionNode;
+class MachineRegionInfo;
+
+template <> struct RegionTraits<MachineFunction> {
+  using FuncT = MachineFunction;
+  using BlockT = MachineBasicBlock;
+  using RegionT = MachineRegion;
+  using RegionNodeT = MachineRegionNode;
+  using RegionInfoT = MachineRegionInfo;
+  using DomTreeT = MachineDominatorTree;
+  using DomTreeNodeT = MachineDomTreeNode;
+  using PostDomTreeT = MachinePostDominatorTree;
+  using DomFrontierT = MachineDominanceFrontier;
+  using InstT = MachineInstr;
+  using LoopT = MachineLoop;
+  using LoopInfoT = MachineLoopInfo;
+
+  static unsigned getNumSuccessors(MachineBasicBlock *BB) {
+    return BB->succ_size();
+  }
+};
+
+class MachineRegionNode : public RegionNodeBase<RegionTraits<MachineFunction>> {
+public:
+  inline MachineRegionNode(MachineRegion *Parent, MachineBasicBlock *Entry,
+                           bool isSubRegion = false)
+      : RegionNodeBase<RegionTraits<MachineFunction>>(Parent, Entry,
+                                                      isSubRegion) {}
+
+  bool operator==(const MachineRegion &RN) const {
+    return this == reinterpret_cast<const MachineRegionNode *>(&RN);
+  }
+};
+
+class MachineRegion : public RegionBase<RegionTraits<MachineFunction>> {
+public:
+  MachineRegion(MachineBasicBlock *Entry, MachineBasicBlock *Exit,
+                MachineRegionInfo *RI, MachineDominatorTree *DT,
+                MachineRegion *Parent = nullptr);
+  ~MachineRegion();
+
+  bool operator==(const MachineRegionNode &RN) const {
+    return &RN == reinterpret_cast<const MachineRegionNode *>(this);
+  }
+};
+
+class MachineRegionInfo : public RegionInfoBase<RegionTraits<MachineFunction>> {
+public:
+  explicit MachineRegionInfo();
+  ~MachineRegionInfo() override;
+
+  // updateStatistics - Update statistic about created regions.
+  void updateStatistics(MachineRegion *R) final;
+
+  void recalculate(MachineFunction &F, MachineDominatorTree *DT,
+                   MachinePostDominatorTree *PDT, MachineDominanceFrontier *DF);
+};
+
+class MachineRegionInfoPass : public MachineFunctionPass {
+  MachineRegionInfo RI;
+
+public:
+  static char ID;
+
+  explicit MachineRegionInfoPass();
+  ~MachineRegionInfoPass() override;
+
+  MachineRegionInfo &getRegionInfo() { return RI; }
+
+  const MachineRegionInfo &getRegionInfo() const { return RI; }
+
+  /// @name MachineFunctionPass interface
+  //@{
+  bool runOnMachineFunction(MachineFunction &F) override;
+  void releaseMemory() override;
+  void verifyAnalysis() const override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+  void print(raw_ostream &OS, const Module *) const override;
+  void dump() const;
+  //@}
+};
+
+template <>
+template <>
+inline MachineBasicBlock *
+RegionNodeBase<RegionTraits<MachineFunction>>::getNodeAs<MachineBasicBlock>()
+    const {
+  assert(!isSubRegion() && "This is not a MachineBasicBlock RegionNode!");
+  return getEntry();
+}
+
+template <>
+template <>
+inline MachineRegion *
+RegionNodeBase<RegionTraits<MachineFunction>>::getNodeAs<MachineRegion>()
+    const {
+  assert(isSubRegion() && "This is not a subregion RegionNode!");
+  auto Unconst =
+      const_cast<RegionNodeBase<RegionTraits<MachineFunction>> *>(this);
+  return reinterpret_cast<MachineRegion *>(Unconst);
+}
+
+RegionNodeGraphTraits(MachineRegionNode, MachineBasicBlock, MachineRegion);
+RegionNodeGraphTraits(const MachineRegionNode, MachineBasicBlock,
+                      MachineRegion);
+
+RegionGraphTraits(MachineRegion, MachineRegionNode);
+RegionGraphTraits(const MachineRegion, const MachineRegionNode);
+
+template <>
+struct GraphTraits<MachineRegionInfo *>
+    : public GraphTraits<FlatIt<MachineRegionNode *>> {
+  using nodes_iterator = df_iterator<NodeRef, df_iterator_default_set<NodeRef>,
+                                     false, GraphTraits<FlatIt<NodeRef>>>;
+
+  static NodeRef getEntryNode(MachineRegionInfo *RI) {
+    return GraphTraits<FlatIt<MachineRegion *>>::getEntryNode(
+        RI->getTopLevelRegion());
+  }
+
+  static nodes_iterator nodes_begin(MachineRegionInfo *RI) {
+    return nodes_iterator::begin(getEntryNode(RI));
+  }
+
+  static nodes_iterator nodes_end(MachineRegionInfo *RI) {
+    return nodes_iterator::end(getEntryNode(RI));
+  }
+};
+
+template <>
+struct GraphTraits<MachineRegionInfoPass *>
+    : public GraphTraits<MachineRegionInfo *> {
+  using nodes_iterator = df_iterator<NodeRef, df_iterator_default_set<NodeRef>,
+                                     false, GraphTraits<FlatIt<NodeRef>>>;
+
+  static NodeRef getEntryNode(MachineRegionInfoPass *RI) {
+    return GraphTraits<MachineRegionInfo *>::getEntryNode(&RI->getRegionInfo());
+  }
+
+  static nodes_iterator nodes_begin(MachineRegionInfoPass *RI) {
+    return GraphTraits<MachineRegionInfo *>::nodes_begin(&RI->getRegionInfo());
+  }
+
+  static nodes_iterator nodes_end(MachineRegionInfoPass *RI) {
+    return GraphTraits<MachineRegionInfo *>::nodes_end(&RI->getRegionInfo());
+  }
+};
+
+extern template class RegionBase<RegionTraits<MachineFunction>>;
+extern template class RegionNodeBase<RegionTraits<MachineFunction>>;
+extern template class RegionInfoBase<RegionTraits<MachineFunction>>;
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEREGIONINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineRegisterInfo.h b/linux-x64/clang/include/llvm/CodeGen/MachineRegisterInfo.h
new file mode 100644
index 0000000..b0dfd02
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -0,0 +1,1197 @@
+//===- llvm/CodeGen/MachineRegisterInfo.h -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MachineRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEREGISTERINFO_H
+#define LLVM_CODEGEN_MACHINEREGISTERINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
+#include "llvm/CodeGen/LowLevelType.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBundle.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/MC/LaneBitmask.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class PSetIterator;
+
+/// Convenient type to represent either a register class or a register bank.
+using RegClassOrRegBank =
+    PointerUnion<const TargetRegisterClass *, const RegisterBank *>;
+
+/// MachineRegisterInfo - Keep track of information for virtual and physical
+/// registers, including vreg register classes, use/def chains for registers,
+/// etc.
+class MachineRegisterInfo {
+public:
+  class Delegate {
+    virtual void anchor();
+
+  public:
+    virtual ~Delegate() = default;
+
+    virtual void MRI_NoteNewVirtualRegister(unsigned Reg) = 0;
+  };
+
+private:
+  MachineFunction *MF;
+  Delegate *TheDelegate = nullptr;
+
+  /// True if subregister liveness is tracked.
+  const bool TracksSubRegLiveness;
+
+  /// VRegInfo - Information we keep for each virtual register.
+  ///
+  /// Each element in this list contains the register class of the vreg and the
+  /// start of the use/def list for the register.
+  IndexedMap<std::pair<RegClassOrRegBank, MachineOperand *>,
+             VirtReg2IndexFunctor>
+      VRegInfo;
+
+  /// Map for recovering vreg name from vreg number.
+  /// This map is used by the MIR Printer.
+  IndexedMap<std::string, VirtReg2IndexFunctor> VReg2Name;
+
+  /// StringSet that is used to unique vreg names.
+  StringSet<> VRegNames;
+
+  /// The flag is true upon \p UpdatedCSRs initialization
+  /// and false otherwise.
+  bool IsUpdatedCSRsInitialized;
+
+  /// Contains the updated callee saved register list.
+  /// As opposed to the static list defined in register info,
+  /// all registers that were disabled are removed from the list.
+  SmallVector<MCPhysReg, 16> UpdatedCSRs;
+
+  /// RegAllocHints - This vector records register allocation hints for
+  /// virtual registers. For each virtual register, it keeps a pair of hint
+  /// type and hints vector making up the allocation hints. Only the first
+  /// hint may be target specific, and in that case this is reflected by the
+  /// first member of the pair being non-zero. If the hinted register is
+  /// virtual, it means the allocator should prefer the physical register
+  /// allocated to it if any.
+  IndexedMap<std::pair<unsigned, SmallVector<unsigned, 4>>,
+             VirtReg2IndexFunctor> RegAllocHints;
+
+  /// PhysRegUseDefLists - This is an array of the head of the use/def list for
+  /// physical registers.
+  std::unique_ptr<MachineOperand *[]> PhysRegUseDefLists;
+
+  /// getRegUseDefListHead - Return the head pointer for the register use/def
+  /// list for the specified virtual or physical register.
+  MachineOperand *&getRegUseDefListHead(unsigned RegNo) {
+    if (TargetRegisterInfo::isVirtualRegister(RegNo))
+      return VRegInfo[RegNo].second;
+    return PhysRegUseDefLists[RegNo];
+  }
+
+  MachineOperand *getRegUseDefListHead(unsigned RegNo) const {
+    if (TargetRegisterInfo::isVirtualRegister(RegNo))
+      return VRegInfo[RegNo].second;
+    return PhysRegUseDefLists[RegNo];
+  }
+
+  /// Get the next element in the use-def chain.
+  static MachineOperand *getNextOperandForReg(const MachineOperand *MO) {
+    assert(MO && MO->isReg() && "This is not a register operand!");
+    return MO->Contents.Reg.Next;
+  }
+
+  /// UsedPhysRegMask - Additional used physregs including aliases.
+  /// This bit vector represents all the registers clobbered by function calls.
+  BitVector UsedPhysRegMask;
+
+  /// ReservedRegs - This is a bit vector of reserved registers.  The target
+  /// may change its mind about which registers should be reserved.  This
+  /// vector is the frozen set of reserved registers when register allocation
+  /// started.
+  BitVector ReservedRegs;
+
+  using VRegToTypeMap = DenseMap<unsigned, LLT>;
+  /// Map generic virtual registers to their actual size.
+  mutable std::unique_ptr<VRegToTypeMap> VRegToType;
+
+  /// Keep track of the physical registers that are live in to the function.
+  /// Live in values are typically arguments in registers.  LiveIn values are
+  /// allowed to have virtual registers associated with them, stored in the
+  /// second element.
+  std::vector<std::pair<unsigned, unsigned>> LiveIns;
+
+public:
+  explicit MachineRegisterInfo(MachineFunction *MF);
+  MachineRegisterInfo(const MachineRegisterInfo &) = delete;
+  MachineRegisterInfo &operator=(const MachineRegisterInfo &) = delete;
+
+  const TargetRegisterInfo *getTargetRegisterInfo() const {
+    return MF->getSubtarget().getRegisterInfo();
+  }
+
+  void resetDelegate(Delegate *delegate) {
+    // Ensure another delegate does not take over unless the current
+    // delegate first unattaches itself. If we ever need to multicast
+    // notifications, we will need to change to using a list.
+    assert(TheDelegate == delegate &&
+           "Only the current delegate can perform reset!");
+    TheDelegate = nullptr;
+  }
+
+  void setDelegate(Delegate *delegate) {
+    assert(delegate && !TheDelegate &&
+           "Attempted to set delegate to null, or to change it without "
+           "first resetting it!");
+
+    TheDelegate = delegate;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Function State
+  //===--------------------------------------------------------------------===//
+
+  // isSSA - Returns true when the machine function is in SSA form. Early
+  // passes require the machine function to be in SSA form where every virtual
+  // register has a single defining instruction.
+  //
+  // The TwoAddressInstructionPass and PHIElimination passes take the machine
+  // function out of SSA form when they introduce multiple defs per virtual
+  // register.
+  bool isSSA() const {
+    return MF->getProperties().hasProperty(
+        MachineFunctionProperties::Property::IsSSA);
+  }
+
+  // leaveSSA - Indicates that the machine function is no longer in SSA form.
+  void leaveSSA() {
+    MF->getProperties().reset(MachineFunctionProperties::Property::IsSSA);
+  }
+
+  /// tracksLiveness - Returns true when tracking register liveness accurately.
+  /// (see MachineFUnctionProperties::Property description for details)
+  bool tracksLiveness() const {
+    return MF->getProperties().hasProperty(
+        MachineFunctionProperties::Property::TracksLiveness);
+  }
+
+  /// invalidateLiveness - Indicates that register liveness is no longer being
+  /// tracked accurately.
+  ///
+  /// This should be called by late passes that invalidate the liveness
+  /// information.
+  void invalidateLiveness() {
+    MF->getProperties().reset(
+        MachineFunctionProperties::Property::TracksLiveness);
+  }
+
+  /// Returns true if liveness for register class @p RC should be tracked at
+  /// the subregister level.
+  bool shouldTrackSubRegLiveness(const TargetRegisterClass &RC) const {
+    return subRegLivenessEnabled() && RC.HasDisjunctSubRegs;
+  }
+  bool shouldTrackSubRegLiveness(unsigned VReg) const {
+    assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Must pass a VReg");
+    return shouldTrackSubRegLiveness(*getRegClass(VReg));
+  }
+  bool subRegLivenessEnabled() const {
+    return TracksSubRegLiveness;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Register Info
+  //===--------------------------------------------------------------------===//
+
+  /// Returns true if the updated CSR list was initialized and false otherwise.
+  bool isUpdatedCSRsInitialized() const { return IsUpdatedCSRsInitialized; }
+
+  /// Disables the register from the list of CSRs.
+  /// I.e. the register will not appear as part of the CSR mask.
+  /// \see UpdatedCalleeSavedRegs.
+  void disableCalleeSavedRegister(unsigned Reg);
+
+  /// Returns list of callee saved registers.
+  /// The function returns the updated CSR list (after taking into account
+  /// registers that are disabled from the CSR list).
+  const MCPhysReg *getCalleeSavedRegs() const;
+
+  /// Sets the updated Callee Saved Registers list.
+  /// Notice that it will override ant previously disabled/saved CSRs.
+  void setCalleeSavedRegs(ArrayRef<MCPhysReg> CSRs);
+
+  // Strictly for use by MachineInstr.cpp.
+  void addRegOperandToUseList(MachineOperand *MO);
+
+  // Strictly for use by MachineInstr.cpp.
+  void removeRegOperandFromUseList(MachineOperand *MO);
+
+  // Strictly for use by MachineInstr.cpp.
+  void moveOperands(MachineOperand *Dst, MachineOperand *Src, unsigned NumOps);
+
+  /// Verify the sanity of the use list for Reg.
+  void verifyUseList(unsigned Reg) const;
+
+  /// Verify the use list of all registers.
+  void verifyUseLists() const;
+
+  /// reg_begin/reg_end - Provide iteration support to walk over all definitions
+  /// and uses of a register within the MachineFunction that corresponds to this
+  /// MachineRegisterInfo object.
+  template<bool Uses, bool Defs, bool SkipDebug,
+           bool ByOperand, bool ByInstr, bool ByBundle>
+  class defusechain_iterator;
+  template<bool Uses, bool Defs, bool SkipDebug,
+           bool ByOperand, bool ByInstr, bool ByBundle>
+  class defusechain_instr_iterator;
+
+  // Make it a friend so it can access getNextOperandForReg().
+  template<bool, bool, bool, bool, bool, bool>
+    friend class defusechain_iterator;
+  template<bool, bool, bool, bool, bool, bool>
+    friend class defusechain_instr_iterator;
+
+  /// reg_iterator/reg_begin/reg_end - Walk all defs and uses of the specified
+  /// register.
+  using reg_iterator =
+      defusechain_iterator<true, true, false, true, false, false>;
+  reg_iterator reg_begin(unsigned RegNo) const {
+    return reg_iterator(getRegUseDefListHead(RegNo));
+  }
+  static reg_iterator reg_end() { return reg_iterator(nullptr); }
+
+  inline iterator_range<reg_iterator>  reg_operands(unsigned Reg) const {
+    return make_range(reg_begin(Reg), reg_end());
+  }
+
+  /// reg_instr_iterator/reg_instr_begin/reg_instr_end - Walk all defs and uses
+  /// of the specified register, stepping by MachineInstr.
+  using reg_instr_iterator =
+      defusechain_instr_iterator<true, true, false, false, true, false>;
+  reg_instr_iterator reg_instr_begin(unsigned RegNo) const {
+    return reg_instr_iterator(getRegUseDefListHead(RegNo));
+  }
+  static reg_instr_iterator reg_instr_end() {
+    return reg_instr_iterator(nullptr);
+  }
+
+  inline iterator_range<reg_instr_iterator>
+  reg_instructions(unsigned Reg) const {
+    return make_range(reg_instr_begin(Reg), reg_instr_end());
+  }
+
+  /// reg_bundle_iterator/reg_bundle_begin/reg_bundle_end - Walk all defs and uses
+  /// of the specified register, stepping by bundle.
+  using reg_bundle_iterator =
+      defusechain_instr_iterator<true, true, false, false, false, true>;
+  reg_bundle_iterator reg_bundle_begin(unsigned RegNo) const {
+    return reg_bundle_iterator(getRegUseDefListHead(RegNo));
+  }
+  static reg_bundle_iterator reg_bundle_end() {
+    return reg_bundle_iterator(nullptr);
+  }
+
+  inline iterator_range<reg_bundle_iterator> reg_bundles(unsigned Reg) const {
+    return make_range(reg_bundle_begin(Reg), reg_bundle_end());
+  }
+
+  /// reg_empty - Return true if there are no instructions using or defining the
+  /// specified register (it may be live-in).
+  bool reg_empty(unsigned RegNo) const { return reg_begin(RegNo) == reg_end(); }
+
+  /// reg_nodbg_iterator/reg_nodbg_begin/reg_nodbg_end - Walk all defs and uses
+  /// of the specified register, skipping those marked as Debug.
+  using reg_nodbg_iterator =
+      defusechain_iterator<true, true, true, true, false, false>;
+  reg_nodbg_iterator reg_nodbg_begin(unsigned RegNo) const {
+    return reg_nodbg_iterator(getRegUseDefListHead(RegNo));
+  }
+  static reg_nodbg_iterator reg_nodbg_end() {
+    return reg_nodbg_iterator(nullptr);
+  }
+
+  inline iterator_range<reg_nodbg_iterator>
+  reg_nodbg_operands(unsigned Reg) const {
+    return make_range(reg_nodbg_begin(Reg), reg_nodbg_end());
+  }
+
+  /// reg_instr_nodbg_iterator/reg_instr_nodbg_begin/reg_instr_nodbg_end - Walk
+  /// all defs and uses of the specified register, stepping by MachineInstr,
+  /// skipping those marked as Debug.
+  using reg_instr_nodbg_iterator =
+      defusechain_instr_iterator<true, true, true, false, true, false>;
+  reg_instr_nodbg_iterator reg_instr_nodbg_begin(unsigned RegNo) const {
+    return reg_instr_nodbg_iterator(getRegUseDefListHead(RegNo));
+  }
+  static reg_instr_nodbg_iterator reg_instr_nodbg_end() {
+    return reg_instr_nodbg_iterator(nullptr);
+  }
+
+  inline iterator_range<reg_instr_nodbg_iterator>
+  reg_nodbg_instructions(unsigned Reg) const {
+    return make_range(reg_instr_nodbg_begin(Reg), reg_instr_nodbg_end());
+  }
+
+  /// reg_bundle_nodbg_iterator/reg_bundle_nodbg_begin/reg_bundle_nodbg_end - Walk
+  /// all defs and uses of the specified register, stepping by bundle,
+  /// skipping those marked as Debug.
+  using reg_bundle_nodbg_iterator =
+      defusechain_instr_iterator<true, true, true, false, false, true>;
+  reg_bundle_nodbg_iterator reg_bundle_nodbg_begin(unsigned RegNo) const {
+    return reg_bundle_nodbg_iterator(getRegUseDefListHead(RegNo));
+  }
+  static reg_bundle_nodbg_iterator reg_bundle_nodbg_end() {
+    return reg_bundle_nodbg_iterator(nullptr);
+  }
+
+  inline iterator_range<reg_bundle_nodbg_iterator>
+  reg_nodbg_bundles(unsigned Reg) const {
+    return make_range(reg_bundle_nodbg_begin(Reg), reg_bundle_nodbg_end());
+  }
+
+  /// reg_nodbg_empty - Return true if the only instructions using or defining
+  /// Reg are Debug instructions.
+  bool reg_nodbg_empty(unsigned RegNo) const {
+    return reg_nodbg_begin(RegNo) == reg_nodbg_end();
+  }
+
+  /// def_iterator/def_begin/def_end - Walk all defs of the specified register.
+  using def_iterator =
+      defusechain_iterator<false, true, false, true, false, false>;
+  def_iterator def_begin(unsigned RegNo) const {
+    return def_iterator(getRegUseDefListHead(RegNo));
+  }
+  static def_iterator def_end() { return def_iterator(nullptr); }
+
+  inline iterator_range<def_iterator> def_operands(unsigned Reg) const {
+    return make_range(def_begin(Reg), def_end());
+  }
+
+  /// def_instr_iterator/def_instr_begin/def_instr_end - Walk all defs of the
+  /// specified register, stepping by MachineInst.
+  using def_instr_iterator =
+      defusechain_instr_iterator<false, true, false, false, true, false>;
+  def_instr_iterator def_instr_begin(unsigned RegNo) const {
+    return def_instr_iterator(getRegUseDefListHead(RegNo));
+  }
+  static def_instr_iterator def_instr_end() {
+    return def_instr_iterator(nullptr);
+  }
+
+  inline iterator_range<def_instr_iterator>
+  def_instructions(unsigned Reg) const {
+    return make_range(def_instr_begin(Reg), def_instr_end());
+  }
+
+  /// def_bundle_iterator/def_bundle_begin/def_bundle_end - Walk all defs of the
+  /// specified register, stepping by bundle.
+  using def_bundle_iterator =
+      defusechain_instr_iterator<false, true, false, false, false, true>;
+  def_bundle_iterator def_bundle_begin(unsigned RegNo) const {
+    return def_bundle_iterator(getRegUseDefListHead(RegNo));
+  }
+  static def_bundle_iterator def_bundle_end() {
+    return def_bundle_iterator(nullptr);
+  }
+
+  inline iterator_range<def_bundle_iterator> def_bundles(unsigned Reg) const {
+    return make_range(def_bundle_begin(Reg), def_bundle_end());
+  }
+
+  /// def_empty - Return true if there are no instructions defining the
+  /// specified register (it may be live-in).
+  bool def_empty(unsigned RegNo) const { return def_begin(RegNo) == def_end(); }
+
+  StringRef getVRegName(unsigned Reg) const {
+    return VReg2Name.inBounds(Reg) ? StringRef(VReg2Name[Reg]) : "";
+  }
+
+  void insertVRegByName(StringRef Name, unsigned Reg) {
+    assert((Name.empty() || VRegNames.find(Name) == VRegNames.end()) &&
+           "Named VRegs Must be Unique.");
+    if (!Name.empty()) {
+      VRegNames.insert(Name);
+      VReg2Name.grow(Reg);
+      VReg2Name[Reg] = Name.str();
+    }
+  }
+
+  /// Return true if there is exactly one operand defining the specified
+  /// register.
+  bool hasOneDef(unsigned RegNo) const {
+    def_iterator DI = def_begin(RegNo);
+    if (DI == def_end())
+      return false;
+    return ++DI == def_end();
+  }
+
+  /// use_iterator/use_begin/use_end - Walk all uses of the specified register.
+  using use_iterator =
+      defusechain_iterator<true, false, false, true, false, false>;
+  use_iterator use_begin(unsigned RegNo) const {
+    return use_iterator(getRegUseDefListHead(RegNo));
+  }
+  static use_iterator use_end() { return use_iterator(nullptr); }
+
+  inline iterator_range<use_iterator> use_operands(unsigned Reg) const {
+    return make_range(use_begin(Reg), use_end());
+  }
+
+  /// use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the
+  /// specified register, stepping by MachineInstr.
+  using use_instr_iterator =
+      defusechain_instr_iterator<true, false, false, false, true, false>;
+  use_instr_iterator use_instr_begin(unsigned RegNo) const {
+    return use_instr_iterator(getRegUseDefListHead(RegNo));
+  }
+  static use_instr_iterator use_instr_end() {
+    return use_instr_iterator(nullptr);
+  }
+
+  inline iterator_range<use_instr_iterator>
+  use_instructions(unsigned Reg) const {
+    return make_range(use_instr_begin(Reg), use_instr_end());
+  }
+
+  /// use_bundle_iterator/use_bundle_begin/use_bundle_end - Walk all uses of the
+  /// specified register, stepping by bundle.
+  using use_bundle_iterator =
+      defusechain_instr_iterator<true, false, false, false, false, true>;
+  use_bundle_iterator use_bundle_begin(unsigned RegNo) const {
+    return use_bundle_iterator(getRegUseDefListHead(RegNo));
+  }
+  static use_bundle_iterator use_bundle_end() {
+    return use_bundle_iterator(nullptr);
+  }
+
+  inline iterator_range<use_bundle_iterator> use_bundles(unsigned Reg) const {
+    return make_range(use_bundle_begin(Reg), use_bundle_end());
+  }
+
+  /// use_empty - Return true if there are no instructions using the specified
+  /// register.
+  bool use_empty(unsigned RegNo) const { return use_begin(RegNo) == use_end(); }
+
+  /// hasOneUse - Return true if there is exactly one instruction using the
+  /// specified register.
+  bool hasOneUse(unsigned RegNo) const {
+    use_iterator UI = use_begin(RegNo);
+    if (UI == use_end())
+      return false;
+    return ++UI == use_end();
+  }
+
+  /// use_nodbg_iterator/use_nodbg_begin/use_nodbg_end - Walk all uses of the
+  /// specified register, skipping those marked as Debug.
+  using use_nodbg_iterator =
+      defusechain_iterator<true, false, true, true, false, false>;
+  use_nodbg_iterator use_nodbg_begin(unsigned RegNo) const {
+    return use_nodbg_iterator(getRegUseDefListHead(RegNo));
+  }
+  static use_nodbg_iterator use_nodbg_end() {
+    return use_nodbg_iterator(nullptr);
+  }
+
+  inline iterator_range<use_nodbg_iterator>
+  use_nodbg_operands(unsigned Reg) const {
+    return make_range(use_nodbg_begin(Reg), use_nodbg_end());
+  }
+
+  /// use_instr_nodbg_iterator/use_instr_nodbg_begin/use_instr_nodbg_end - Walk
+  /// all uses of the specified register, stepping by MachineInstr, skipping
+  /// those marked as Debug.
+  using use_instr_nodbg_iterator =
+      defusechain_instr_iterator<true, false, true, false, true, false>;
+  use_instr_nodbg_iterator use_instr_nodbg_begin(unsigned RegNo) const {
+    return use_instr_nodbg_iterator(getRegUseDefListHead(RegNo));
+  }
+  static use_instr_nodbg_iterator use_instr_nodbg_end() {
+    return use_instr_nodbg_iterator(nullptr);
+  }
+
+  inline iterator_range<use_instr_nodbg_iterator>
+  use_nodbg_instructions(unsigned Reg) const {
+    return make_range(use_instr_nodbg_begin(Reg), use_instr_nodbg_end());
+  }
+
+  /// use_bundle_nodbg_iterator/use_bundle_nodbg_begin/use_bundle_nodbg_end - Walk
+  /// all uses of the specified register, stepping by bundle, skipping
+  /// those marked as Debug.
+  using use_bundle_nodbg_iterator =
+      defusechain_instr_iterator<true, false, true, false, false, true>;
+  use_bundle_nodbg_iterator use_bundle_nodbg_begin(unsigned RegNo) const {
+    return use_bundle_nodbg_iterator(getRegUseDefListHead(RegNo));
+  }
+  static use_bundle_nodbg_iterator use_bundle_nodbg_end() {
+    return use_bundle_nodbg_iterator(nullptr);
+  }
+
+  inline iterator_range<use_bundle_nodbg_iterator>
+  use_nodbg_bundles(unsigned Reg) const {
+    return make_range(use_bundle_nodbg_begin(Reg), use_bundle_nodbg_end());
+  }
+
+  /// use_nodbg_empty - Return true if there are no non-Debug instructions
+  /// using the specified register.
+  bool use_nodbg_empty(unsigned RegNo) const {
+    return use_nodbg_begin(RegNo) == use_nodbg_end();
+  }
+
+  /// hasOneNonDBGUse - Return true if there is exactly one non-Debug
+  /// instruction using the specified register.
+  bool hasOneNonDBGUse(unsigned RegNo) const;
+
+  /// replaceRegWith - Replace all instances of FromReg with ToReg in the
+  /// machine function.  This is like llvm-level X->replaceAllUsesWith(Y),
+  /// except that it also changes any definitions of the register as well.
+  ///
+  /// Note that it is usually necessary to first constrain ToReg's register
+  /// class and register bank to match the FromReg constraints using one of the
+  /// methods:
+  ///
+  ///   constrainRegClass(ToReg, getRegClass(FromReg))
+  ///   constrainRegAttrs(ToReg, FromReg)
+  ///   RegisterBankInfo::constrainGenericRegister(ToReg,
+  ///       *MRI.getRegClass(FromReg), MRI)
+  ///
+  /// These functions will return a falsy result if the virtual registers have
+  /// incompatible constraints.
+  ///
+  /// Note that if ToReg is a physical register the function will replace and
+  /// apply sub registers to ToReg in order to obtain a final/proper physical
+  /// register.
+  void replaceRegWith(unsigned FromReg, unsigned ToReg);
+
+  /// getVRegDef - Return the machine instr that defines the specified virtual
+  /// register or null if none is found.  This assumes that the code is in SSA
+  /// form, so there should only be one definition.
+  MachineInstr *getVRegDef(unsigned Reg) const;
+
+  /// getUniqueVRegDef - Return the unique machine instr that defines the
+  /// specified virtual register or null if none is found.  If there are
+  /// multiple definitions or no definition, return null.
+  MachineInstr *getUniqueVRegDef(unsigned Reg) const;
+
+  /// clearKillFlags - Iterate over all the uses of the given register and
+  /// clear the kill flag from the MachineOperand. This function is used by
+  /// optimization passes which extend register lifetimes and need only
+  /// preserve conservative kill flag information.
+  void clearKillFlags(unsigned Reg) const;
+
+  void dumpUses(unsigned RegNo) const;
+
+  /// Returns true if PhysReg is unallocatable and constant throughout the
+  /// function. Writing to a constant register has no effect.
+  bool isConstantPhysReg(unsigned PhysReg) const;
+
+  /// Returns true if either isConstantPhysReg or TRI->isCallerPreservedPhysReg
+  /// returns true. This is a utility member function.
+  bool isCallerPreservedOrConstPhysReg(unsigned PhysReg) const;
+
+  /// Get an iterator over the pressure sets affected by the given physical or
+  /// virtual register. If RegUnit is physical, it must be a register unit (from
+  /// MCRegUnitIterator).
+  PSetIterator getPressureSets(unsigned RegUnit) const;
+
+  //===--------------------------------------------------------------------===//
+  // Virtual Register Info
+  //===--------------------------------------------------------------------===//
+
+  /// Return the register class of the specified virtual register.
+  /// This shouldn't be used directly unless \p Reg has a register class.
+  /// \see getRegClassOrNull when this might happen.
+  const TargetRegisterClass *getRegClass(unsigned Reg) const {
+    assert(VRegInfo[Reg].first.is<const TargetRegisterClass *>() &&
+           "Register class not set, wrong accessor");
+    return VRegInfo[Reg].first.get<const TargetRegisterClass *>();
+  }
+
+  /// Return the register class of \p Reg, or null if Reg has not been assigned
+  /// a register class yet.
+  ///
+  /// \note A null register class can only happen when these two
+  /// conditions are met:
+  /// 1. Generic virtual registers are created.
+  /// 2. The machine function has not completely been through the
+  ///    instruction selection process.
+  /// None of this condition is possible without GlobalISel for now.
+  /// In other words, if GlobalISel is not used or if the query happens after
+  /// the select pass, using getRegClass is safe.
+  const TargetRegisterClass *getRegClassOrNull(unsigned Reg) const {
+    const RegClassOrRegBank &Val = VRegInfo[Reg].first;
+    return Val.dyn_cast<const TargetRegisterClass *>();
+  }
+
+  /// Return the register bank of \p Reg, or null if Reg has not been assigned
+  /// a register bank or has been assigned a register class.
+  /// \note It is possible to get the register bank from the register class via
+  /// RegisterBankInfo::getRegBankFromRegClass.
+  const RegisterBank *getRegBankOrNull(unsigned Reg) const {
+    const RegClassOrRegBank &Val = VRegInfo[Reg].first;
+    return Val.dyn_cast<const RegisterBank *>();
+  }
+
+  /// Return the register bank or register class of \p Reg.
+  /// \note Before the register bank gets assigned (i.e., before the
+  /// RegBankSelect pass) \p Reg may not have either.
+  const RegClassOrRegBank &getRegClassOrRegBank(unsigned Reg) const {
+    return VRegInfo[Reg].first;
+  }
+
+  /// setRegClass - Set the register class of the specified virtual register.
+  void setRegClass(unsigned Reg, const TargetRegisterClass *RC);
+
+  /// Set the register bank to \p RegBank for \p Reg.
+  void setRegBank(unsigned Reg, const RegisterBank &RegBank);
+
+  void setRegClassOrRegBank(unsigned Reg,
+                            const RegClassOrRegBank &RCOrRB){
+    VRegInfo[Reg].first = RCOrRB;
+  }
+
+  /// constrainRegClass - Constrain the register class of the specified virtual
+  /// register to be a common subclass of RC and the current register class,
+  /// but only if the new class has at least MinNumRegs registers.  Return the
+  /// new register class, or NULL if no such class exists.
+  /// This should only be used when the constraint is known to be trivial, like
+  /// GR32 -> GR32_NOSP. Beware of increasing register pressure.
+  ///
+  /// \note Assumes that the register has a register class assigned.
+  /// Use RegisterBankInfo::constrainGenericRegister in GlobalISel's
+  /// InstructionSelect pass and constrainRegAttrs in every other pass,
+  /// including non-select passes of GlobalISel, instead.
+  const TargetRegisterClass *constrainRegClass(unsigned Reg,
+                                               const TargetRegisterClass *RC,
+                                               unsigned MinNumRegs = 0);
+
+  /// Constrain the register class or the register bank of the virtual register
+  /// \p Reg to be a common subclass and a common bank of both registers
+  /// provided respectively. Do nothing if any of the attributes (classes,
+  /// banks, or low-level types) of the registers are deemed incompatible, or if
+  /// the resulting register will have a class smaller than before and of size
+  /// less than \p MinNumRegs. Return true if such register attributes exist,
+  /// false otherwise.
+  ///
+  /// \note Assumes that each register has either a low-level type or a class
+  /// assigned, but not both. Use this method instead of constrainRegClass and
+  /// RegisterBankInfo::constrainGenericRegister everywhere but SelectionDAG
+  /// ISel / FastISel and GlobalISel's InstructionSelect pass respectively.
+  bool constrainRegAttrs(unsigned Reg, unsigned ConstrainingReg,
+                         unsigned MinNumRegs = 0);
+
+  /// recomputeRegClass - Try to find a legal super-class of Reg's register
+  /// class that still satisfies the constraints from the instructions using
+  /// Reg.  Returns true if Reg was upgraded.
+  ///
+  /// This method can be used after constraints have been removed from a
+  /// virtual register, for example after removing instructions or splitting
+  /// the live range.
+  bool recomputeRegClass(unsigned Reg);
+
+  /// createVirtualRegister - Create and return a new virtual register in the
+  /// function with the specified register class.
+  unsigned createVirtualRegister(const TargetRegisterClass *RegClass);
+
+  /// Accessor for VRegToType. This accessor should only be used
+  /// by global-isel related work.
+  VRegToTypeMap &getVRegToType() const {
+    if (!VRegToType)
+      VRegToType.reset(new VRegToTypeMap);
+    return *VRegToType.get();
+  }
+
+  /// Get the low-level type of \p VReg or LLT{} if VReg is not a generic
+  /// (target independent) virtual register.
+  LLT getType(unsigned VReg) const;
+
+  /// Set the low-level type of \p VReg to \p Ty.
+  void setType(unsigned VReg, LLT Ty);
+
+  /// Create and return a new generic virtual register with low-level
+  /// type \p Ty.
+  unsigned createGenericVirtualRegister(LLT Ty);
+
+  /// Remove all types associated to virtual registers (after instruction
+  /// selection and constraining of all generic virtual registers).
+  void clearVirtRegTypes();
+
+  /// Creates a new virtual register that has no register class, register bank
+  /// or size assigned yet. This is only allowed to be used
+  /// temporarily while constructing machine instructions. Most operations are
+  /// undefined on an incomplete register until one of setRegClass(),
+  /// setRegBank() or setSize() has been called on it.
+  unsigned createIncompleteVirtualRegister(StringRef Name = "");
+
+  /// getNumVirtRegs - Return the number of virtual registers created.
+  unsigned getNumVirtRegs() const { return VRegInfo.size(); }
+
+  /// clearVirtRegs - Remove all virtual registers (after physreg assignment).
+  void clearVirtRegs();
+
+  /// setRegAllocationHint - Specify a register allocation hint for the
+  /// specified virtual register. This is typically used by target, and in case
+  /// of an earlier hint it will be overwritten.
+  void setRegAllocationHint(unsigned VReg, unsigned Type, unsigned PrefReg) {
+    assert(TargetRegisterInfo::isVirtualRegister(VReg));
+    RegAllocHints[VReg].first  = Type;
+    RegAllocHints[VReg].second.clear();
+    RegAllocHints[VReg].second.push_back(PrefReg);
+  }
+
+  /// addRegAllocationHint - Add a register allocation hint to the hints
+  /// vector for VReg.
+  void addRegAllocationHint(unsigned VReg, unsigned PrefReg) {
+    assert(TargetRegisterInfo::isVirtualRegister(VReg));
+    RegAllocHints[VReg].second.push_back(PrefReg);
+  }
+
+  /// Specify the preferred (target independent) register allocation hint for
+  /// the specified virtual register.
+  void setSimpleHint(unsigned VReg, unsigned PrefReg) {
+    setRegAllocationHint(VReg, /*Type=*/0, PrefReg);
+  }
+
+  void clearSimpleHint(unsigned VReg) {
+    assert (RegAllocHints[VReg].first == 0 &&
+            "Expected to clear a non-target hint!");
+    RegAllocHints[VReg].second.clear();
+  }
+
+  /// getRegAllocationHint - Return the register allocation hint for the
+  /// specified virtual register. If there are many hints, this returns the
+  /// one with the greatest weight.
+  std::pair<unsigned, unsigned>
+  getRegAllocationHint(unsigned VReg) const {
+    assert(TargetRegisterInfo::isVirtualRegister(VReg));
+    unsigned BestHint = (RegAllocHints[VReg].second.size() ?
+                         RegAllocHints[VReg].second[0] : 0);
+    return std::pair<unsigned, unsigned>(RegAllocHints[VReg].first, BestHint);
+  }
+
+  /// getSimpleHint - same as getRegAllocationHint except it will only return
+  /// a target independent hint.
+  unsigned getSimpleHint(unsigned VReg) const {
+    assert(TargetRegisterInfo::isVirtualRegister(VReg));
+    std::pair<unsigned, unsigned> Hint = getRegAllocationHint(VReg);
+    return Hint.first ? 0 : Hint.second;
+  }
+
+  /// getRegAllocationHints - Return a reference to the vector of all
+  /// register allocation hints for VReg.
+  const std::pair<unsigned, SmallVector<unsigned, 4>>
+  &getRegAllocationHints(unsigned VReg) const {
+    assert(TargetRegisterInfo::isVirtualRegister(VReg));
+    return RegAllocHints[VReg];
+  }
+
+  /// markUsesInDebugValueAsUndef - Mark every DBG_VALUE referencing the
+  /// specified register as undefined which causes the DBG_VALUE to be
+  /// deleted during LiveDebugVariables analysis.
+  void markUsesInDebugValueAsUndef(unsigned Reg) const;
+
+  /// Return true if the specified register is modified in this function.
+  /// This checks that no defining machine operands exist for the register or
+  /// any of its aliases. Definitions found on functions marked noreturn are
+  /// ignored, to consider them pass 'true' for optional parameter
+  /// SkipNoReturnDef. The register is also considered modified when it is set
+  /// in the UsedPhysRegMask.
+  bool isPhysRegModified(unsigned PhysReg, bool SkipNoReturnDef = false) const;
+
+  /// Return true if the specified register is modified or read in this
+  /// function. This checks that no machine operands exist for the register or
+  /// any of its aliases. The register is also considered used when it is set
+  /// in the UsedPhysRegMask.
+  bool isPhysRegUsed(unsigned PhysReg) const;
+
+  /// addPhysRegsUsedFromRegMask - Mark any registers not in RegMask as used.
+  /// This corresponds to the bit mask attached to register mask operands.
+  void addPhysRegsUsedFromRegMask(const uint32_t *RegMask) {
+    UsedPhysRegMask.setBitsNotInMask(RegMask);
+  }
+
+  const BitVector &getUsedPhysRegsMask() const { return UsedPhysRegMask; }
+
+  //===--------------------------------------------------------------------===//
+  // Reserved Register Info
+  //===--------------------------------------------------------------------===//
+  //
+  // The set of reserved registers must be invariant during register
+  // allocation.  For example, the target cannot suddenly decide it needs a
+  // frame pointer when the register allocator has already used the frame
+  // pointer register for something else.
+  //
+  // These methods can be used by target hooks like hasFP() to avoid changing
+  // the reserved register set during register allocation.
+
+  /// freezeReservedRegs - Called by the register allocator to freeze the set
+  /// of reserved registers before allocation begins.
+  void freezeReservedRegs(const MachineFunction&);
+
+  /// reservedRegsFrozen - Returns true after freezeReservedRegs() was called
+  /// to ensure the set of reserved registers stays constant.
+  bool reservedRegsFrozen() const {
+    return !ReservedRegs.empty();
+  }
+
+  /// canReserveReg - Returns true if PhysReg can be used as a reserved
+  /// register.  Any register can be reserved before freezeReservedRegs() is
+  /// called.
+  bool canReserveReg(unsigned PhysReg) const {
+    return !reservedRegsFrozen() || ReservedRegs.test(PhysReg);
+  }
+
+  /// getReservedRegs - Returns a reference to the frozen set of reserved
+  /// registers. This method should always be preferred to calling
+  /// TRI::getReservedRegs() when possible.
+  const BitVector &getReservedRegs() const {
+    assert(reservedRegsFrozen() &&
+           "Reserved registers haven't been frozen yet. "
+           "Use TRI::getReservedRegs().");
+    return ReservedRegs;
+  }
+
+  /// isReserved - Returns true when PhysReg is a reserved register.
+  ///
+  /// Reserved registers may belong to an allocatable register class, but the
+  /// target has explicitly requested that they are not used.
+  bool isReserved(unsigned PhysReg) const {
+    return getReservedRegs().test(PhysReg);
+  }
+
+  /// Returns true when the given register unit is considered reserved.
+  ///
+  /// Register units are considered reserved when for at least one of their
+  /// root registers, the root register and all super registers are reserved.
+  /// This currently iterates the register hierarchy and may be slower than
+  /// expected.
+  bool isReservedRegUnit(unsigned Unit) const;
+
+  /// isAllocatable - Returns true when PhysReg belongs to an allocatable
+  /// register class and it hasn't been reserved.
+  ///
+  /// Allocatable registers may show up in the allocation order of some virtual
+  /// register, so a register allocator needs to track its liveness and
+  /// availability.
+  bool isAllocatable(unsigned PhysReg) const {
+    return getTargetRegisterInfo()->isInAllocatableClass(PhysReg) &&
+      !isReserved(PhysReg);
+  }
+
+  //===--------------------------------------------------------------------===//
+  // LiveIn Management
+  //===--------------------------------------------------------------------===//
+
+  /// addLiveIn - Add the specified register as a live-in.  Note that it
+  /// is an error to add the same register to the same set more than once.
+  void addLiveIn(unsigned Reg, unsigned vreg = 0) {
+    LiveIns.push_back(std::make_pair(Reg, vreg));
+  }
+
+  // Iteration support for the live-ins set.  It's kept in sorted order
+  // by register number.
+  using livein_iterator =
+      std::vector<std::pair<unsigned,unsigned>>::const_iterator;
+  livein_iterator livein_begin() const { return LiveIns.begin(); }
+  livein_iterator livein_end()   const { return LiveIns.end(); }
+  bool            livein_empty() const { return LiveIns.empty(); }
+
+  ArrayRef<std::pair<unsigned, unsigned>> liveins() const {
+    return LiveIns;
+  }
+
+  bool isLiveIn(unsigned Reg) const;
+
+  /// getLiveInPhysReg - If VReg is a live-in virtual register, return the
+  /// corresponding live-in physical register.
+  unsigned getLiveInPhysReg(unsigned VReg) const;
+
+  /// getLiveInVirtReg - If PReg is a live-in physical register, return the
+  /// corresponding live-in physical register.
+  unsigned getLiveInVirtReg(unsigned PReg) const;
+
+  /// EmitLiveInCopies - Emit copies to initialize livein virtual registers
+  /// into the given entry block.
+  void EmitLiveInCopies(MachineBasicBlock *EntryMBB,
+                        const TargetRegisterInfo &TRI,
+                        const TargetInstrInfo &TII);
+
+  /// Returns a mask covering all bits that can appear in lane masks of
+  /// subregisters of the virtual register @p Reg.
+  LaneBitmask getMaxLaneMaskForVReg(unsigned Reg) const;
+
+  /// defusechain_iterator - This class provides iterator support for machine
+  /// operands in the function that use or define a specific register.  If
+  /// ReturnUses is true it returns uses of registers, if ReturnDefs is true it
+  /// returns defs.  If neither are true then you are silly and it always
+  /// returns end().  If SkipDebug is true it skips uses marked Debug
+  /// when incrementing.
+  template<bool ReturnUses, bool ReturnDefs, bool SkipDebug,
+           bool ByOperand, bool ByInstr, bool ByBundle>
+  class defusechain_iterator
+    : public std::iterator<std::forward_iterator_tag, MachineInstr, ptrdiff_t> {
+    friend class MachineRegisterInfo;
+
+    MachineOperand *Op = nullptr;
+
+    explicit defusechain_iterator(MachineOperand *op) : Op(op) {
+      // If the first node isn't one we're interested in, advance to one that
+      // we are interested in.
+      if (op) {
+        if ((!ReturnUses && op->isUse()) ||
+            (!ReturnDefs && op->isDef()) ||
+            (SkipDebug && op->isDebug()))
+          advance();
+      }
+    }
+
+    void advance() {
+      assert(Op && "Cannot increment end iterator!");
+      Op = getNextOperandForReg(Op);
+
+      // All defs come before the uses, so stop def_iterator early.
+      if (!ReturnUses) {
+        if (Op) {
+          if (Op->isUse())
+            Op = nullptr;
+          else
+            assert(!Op->isDebug() && "Can't have debug defs");
+        }
+      } else {
+        // If this is an operand we don't care about, skip it.
+        while (Op && ((!ReturnDefs && Op->isDef()) ||
+                      (SkipDebug && Op->isDebug())))
+          Op = getNextOperandForReg(Op);
+      }
+    }
+
+  public:
+    using reference = std::iterator<std::forward_iterator_tag,
+                                    MachineInstr, ptrdiff_t>::reference;
+    using pointer = std::iterator<std::forward_iterator_tag,
+                                  MachineInstr, ptrdiff_t>::pointer;
+
+    defusechain_iterator() = default;
+
+    bool operator==(const defusechain_iterator &x) const {
+      return Op == x.Op;
+    }
+    bool operator!=(const defusechain_iterator &x) const {
+      return !operator==(x);
+    }
+
+    /// atEnd - return true if this iterator is equal to reg_end() on the value.
+    bool atEnd() const { return Op == nullptr; }
+
+    // Iterator traversal: forward iteration only
+    defusechain_iterator &operator++() {          // Preincrement
+      assert(Op && "Cannot increment end iterator!");
+      if (ByOperand)
+        advance();
+      else if (ByInstr) {
+        MachineInstr *P = Op->getParent();
+        do {
+          advance();
+        } while (Op && Op->getParent() == P);
+      } else if (ByBundle) {
+        MachineBasicBlock::instr_iterator P =
+            getBundleStart(Op->getParent()->getIterator());
+        do {
+          advance();
+        } while (Op && getBundleStart(Op->getParent()->getIterator()) == P);
+      }
+
+      return *this;
+    }
+    defusechain_iterator operator++(int) {        // Postincrement
+      defusechain_iterator tmp = *this; ++*this; return tmp;
+    }
+
+    /// getOperandNo - Return the operand # of this MachineOperand in its
+    /// MachineInstr.
+    unsigned getOperandNo() const {
+      assert(Op && "Cannot dereference end iterator!");
+      return Op - &Op->getParent()->getOperand(0);
+    }
+
+    // Retrieve a reference to the current operand.
+    MachineOperand &operator*() const {
+      assert(Op && "Cannot dereference end iterator!");
+      return *Op;
+    }
+
+    MachineOperand *operator->() const {
+      assert(Op && "Cannot dereference end iterator!");
+      return Op;
+    }
+  };
+
+  /// defusechain_iterator - This class provides iterator support for machine
+  /// operands in the function that use or define a specific register.  If
+  /// ReturnUses is true it returns uses of registers, if ReturnDefs is true it
+  /// returns defs.  If neither are true then you are silly and it always
+  /// returns end().  If SkipDebug is true it skips uses marked Debug
+  /// when incrementing.
+  template<bool ReturnUses, bool ReturnDefs, bool SkipDebug,
+           bool ByOperand, bool ByInstr, bool ByBundle>
+  class defusechain_instr_iterator
+    : public std::iterator<std::forward_iterator_tag, MachineInstr, ptrdiff_t> {
+    friend class MachineRegisterInfo;
+
+    MachineOperand *Op = nullptr;
+
+    explicit defusechain_instr_iterator(MachineOperand *op) : Op(op) {
+      // If the first node isn't one we're interested in, advance to one that
+      // we are interested in.
+      if (op) {
+        if ((!ReturnUses && op->isUse()) ||
+            (!ReturnDefs && op->isDef()) ||
+            (SkipDebug && op->isDebug()))
+          advance();
+      }
+    }
+
+    void advance() {
+      assert(Op && "Cannot increment end iterator!");
+      Op = getNextOperandForReg(Op);
+
+      // All defs come before the uses, so stop def_iterator early.
+      if (!ReturnUses) {
+        if (Op) {
+          if (Op->isUse())
+            Op = nullptr;
+          else
+            assert(!Op->isDebug() && "Can't have debug defs");
+        }
+      } else {
+        // If this is an operand we don't care about, skip it.
+        while (Op && ((!ReturnDefs && Op->isDef()) ||
+                      (SkipDebug && Op->isDebug())))
+          Op = getNextOperandForReg(Op);
+      }
+    }
+
+  public:
+    using reference = std::iterator<std::forward_iterator_tag,
+                                    MachineInstr, ptrdiff_t>::reference;
+    using pointer = std::iterator<std::forward_iterator_tag,
+                                  MachineInstr, ptrdiff_t>::pointer;
+
+    defusechain_instr_iterator() = default;
+
+    bool operator==(const defusechain_instr_iterator &x) const {
+      return Op == x.Op;
+    }
+    bool operator!=(const defusechain_instr_iterator &x) const {
+      return !operator==(x);
+    }
+
+    /// atEnd - return true if this iterator is equal to reg_end() on the value.
+    bool atEnd() const { return Op == nullptr; }
+
+    // Iterator traversal: forward iteration only
+    defusechain_instr_iterator &operator++() {          // Preincrement
+      assert(Op && "Cannot increment end iterator!");
+      if (ByOperand)
+        advance();
+      else if (ByInstr) {
+        MachineInstr *P = Op->getParent();
+        do {
+          advance();
+        } while (Op && Op->getParent() == P);
+      } else if (ByBundle) {
+        MachineBasicBlock::instr_iterator P =
+            getBundleStart(Op->getParent()->getIterator());
+        do {
+          advance();
+        } while (Op && getBundleStart(Op->getParent()->getIterator()) == P);
+      }
+
+      return *this;
+    }
+    defusechain_instr_iterator operator++(int) {        // Postincrement
+      defusechain_instr_iterator tmp = *this; ++*this; return tmp;
+    }
+
+    // Retrieve a reference to the current operand.
+    MachineInstr &operator*() const {
+      assert(Op && "Cannot dereference end iterator!");
+      if (ByBundle)
+        return *getBundleStart(Op->getParent()->getIterator());
+      return *Op->getParent();
+    }
+
+    MachineInstr *operator->() const { return &operator*(); }
+  };
+};
+
+/// Iterate over the pressure sets affected by the given physical or virtual
+/// register. If Reg is physical, it must be a register unit (from
+/// MCRegUnitIterator).
+class PSetIterator {
+  const int *PSet = nullptr;
+  unsigned Weight = 0;
+
+public:
+  PSetIterator() = default;
+
+  PSetIterator(unsigned RegUnit, const MachineRegisterInfo *MRI) {
+    const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
+    if (TargetRegisterInfo::isVirtualRegister(RegUnit)) {
+      const TargetRegisterClass *RC = MRI->getRegClass(RegUnit);
+      PSet = TRI->getRegClassPressureSets(RC);
+      Weight = TRI->getRegClassWeight(RC).RegWeight;
+    }
+    else {
+      PSet = TRI->getRegUnitPressureSets(RegUnit);
+      Weight = TRI->getRegUnitWeight(RegUnit);
+    }
+    if (*PSet == -1)
+      PSet = nullptr;
+  }
+
+  bool isValid() const { return PSet; }
+
+  unsigned getWeight() const { return Weight; }
+
+  unsigned operator*() const { return *PSet; }
+
+  void operator++() {
+    assert(isValid() && "Invalid PSetIterator.");
+    ++PSet;
+    if (*PSet == -1)
+      PSet = nullptr;
+  }
+};
+
+inline PSetIterator MachineRegisterInfo::
+getPressureSets(unsigned RegUnit) const {
+  return PSetIterator(RegUnit, this);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEREGISTERINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineSSAUpdater.h b/linux-x64/clang/include/llvm/CodeGen/MachineSSAUpdater.h
new file mode 100644
index 0000000..b5ea208
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineSSAUpdater.h
@@ -0,0 +1,113 @@
+//===- MachineSSAUpdater.h - Unstructured SSA Update Tool -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MachineSSAUpdater class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINESSAUPDATER_H
+#define LLVM_CODEGEN_MACHINESSAUPDATER_H
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineFunction;
+class MachineInstr;
+class MachineOperand;
+class MachineRegisterInfo;
+class TargetInstrInfo;
+class TargetRegisterClass;
+template<typename T> class SmallVectorImpl;
+template<typename T> class SSAUpdaterTraits;
+
+/// MachineSSAUpdater - This class updates SSA form for a set of virtual
+/// registers defined in multiple blocks.  This is used when code duplication
+/// or another unstructured transformation wants to rewrite a set of uses of one
+/// vreg with uses of a set of vregs.
+class MachineSSAUpdater {
+  friend class SSAUpdaterTraits<MachineSSAUpdater>;
+
+private:
+  /// AvailableVals - This keeps track of which value to use on a per-block
+  /// basis.  When we insert PHI nodes, we keep track of them here.
+  //typedef DenseMap<MachineBasicBlock*, unsigned > AvailableValsTy;
+  void *AV = nullptr;
+
+  /// VR - Current virtual register whose uses are being updated.
+  unsigned VR;
+
+  /// VRC - Register class of the current virtual register.
+  const TargetRegisterClass *VRC;
+
+  /// InsertedPHIs - If this is non-null, the MachineSSAUpdater adds all PHI
+  /// nodes that it creates to the vector.
+  SmallVectorImpl<MachineInstr*> *InsertedPHIs;
+
+  const TargetInstrInfo *TII;
+  MachineRegisterInfo *MRI;
+
+public:
+  /// MachineSSAUpdater constructor.  If InsertedPHIs is specified, it will be
+  /// filled in with all PHI Nodes created by rewriting.
+  explicit MachineSSAUpdater(MachineFunction &MF,
+                        SmallVectorImpl<MachineInstr*> *InsertedPHIs = nullptr);
+  MachineSSAUpdater(const MachineSSAUpdater &) = delete;
+  MachineSSAUpdater &operator=(const MachineSSAUpdater &) = delete;
+  ~MachineSSAUpdater();
+
+  /// Initialize - Reset this object to get ready for a new set of SSA
+  /// updates.
+  void Initialize(unsigned V);
+
+  /// AddAvailableValue - Indicate that a rewritten value is available at the
+  /// end of the specified block with the specified value.
+  void AddAvailableValue(MachineBasicBlock *BB, unsigned V);
+
+  /// HasValueForBlock - Return true if the MachineSSAUpdater already has a
+  /// value for the specified block.
+  bool HasValueForBlock(MachineBasicBlock *BB) const;
+
+  /// GetValueAtEndOfBlock - Construct SSA form, materializing a value that is
+  /// live at the end of the specified block.
+  unsigned GetValueAtEndOfBlock(MachineBasicBlock *BB);
+
+  /// GetValueInMiddleOfBlock - Construct SSA form, materializing a value that
+  /// is live in the middle of the specified block.
+  ///
+  /// GetValueInMiddleOfBlock is the same as GetValueAtEndOfBlock except in one
+  /// important case: if there is a definition of the rewritten value after the
+  /// 'use' in BB.  Consider code like this:
+  ///
+  ///      X1 = ...
+  ///   SomeBB:
+  ///      use(X)
+  ///      X2 = ...
+  ///      br Cond, SomeBB, OutBB
+  ///
+  /// In this case, there are two values (X1 and X2) added to the AvailableVals
+  /// set by the client of the rewriter, and those values are both live out of
+  /// their respective blocks.  However, the use of X happens in the *middle* of
+  /// a block.  Because of this, we need to insert a new PHI node in SomeBB to
+  /// merge the appropriate values, and this value isn't live out of the block.
+  unsigned GetValueInMiddleOfBlock(MachineBasicBlock *BB);
+
+  /// RewriteUse - Rewrite a use of the symbolic value.  This handles PHI nodes,
+  /// which use their value in the corresponding predecessor.  Note that this
+  /// will not work if the use is supposed to be rewritten to a value defined in
+  /// the same block as the use, but above it.  Any 'AddAvailableValue's added
+  /// for the use's block will be considered to be below it.
+  void RewriteUse(MachineOperand &U);
+
+private:
+  unsigned GetValueAtEndOfBlockInternal(MachineBasicBlock *BB);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINESSAUPDATER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineScheduler.h b/linux-x64/clang/include/llvm/CodeGen/MachineScheduler.h
new file mode 100644
index 0000000..e327881
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineScheduler.h
@@ -0,0 +1,1056 @@
+//===- MachineScheduler.h - MachineInstr Scheduling Pass --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides an interface for customizing the standard MachineScheduler
+// pass. Note that the entire pass may be replaced as follows:
+//
+// <Target>TargetMachine::createPassConfig(PassManagerBase &PM) {
+//   PM.substitutePass(&MachineSchedulerID, &CustomSchedulerPassID);
+//   ...}
+//
+// The MachineScheduler pass is only responsible for choosing the regions to be
+// scheduled. Targets can override the DAG builder and scheduler without
+// replacing the pass as follows:
+//
+// ScheduleDAGInstrs *<Target>PassConfig::
+// createMachineScheduler(MachineSchedContext *C) {
+//   return new CustomMachineScheduler(C);
+// }
+//
+// The default scheduler, ScheduleDAGMILive, builds the DAG and drives list
+// scheduling while updating the instruction stream, register pressure, and live
+// intervals. Most targets don't need to override the DAG builder and list
+// scheduler, but subtargets that require custom scheduling heuristics may
+// plugin an alternate MachineSchedStrategy. The strategy is responsible for
+// selecting the highest priority node from the list:
+//
+// ScheduleDAGInstrs *<Target>PassConfig::
+// createMachineScheduler(MachineSchedContext *C) {
+//   return new ScheduleDAGMILive(C, CustomStrategy(C));
+// }
+//
+// The DAG builder can also be customized in a sense by adding DAG mutations
+// that will run after DAG building and before list scheduling. DAG mutations
+// can adjust dependencies based on target-specific knowledge or add weak edges
+// to aid heuristics:
+//
+// ScheduleDAGInstrs *<Target>PassConfig::
+// createMachineScheduler(MachineSchedContext *C) {
+//   ScheduleDAGMI *DAG = createGenericSchedLive(C);
+//   DAG->addMutation(new CustomDAGMutation(...));
+//   return DAG;
+// }
+//
+// A target that supports alternative schedulers can use the
+// MachineSchedRegistry to allow command line selection. This can be done by
+// implementing the following boilerplate:
+//
+// static ScheduleDAGInstrs *createCustomMachineSched(MachineSchedContext *C) {
+//  return new CustomMachineScheduler(C);
+// }
+// static MachineSchedRegistry
+// SchedCustomRegistry("custom", "Run my target's custom scheduler",
+//                     createCustomMachineSched);
+//
+//
+// Finally, subtargets that don't need to implement custom heuristics but would
+// like to configure the GenericScheduler's policy for a given scheduler region,
+// including scheduling direction and register pressure tracking policy, can do
+// this:
+//
+// void <SubTarget>Subtarget::
+// overrideSchedPolicy(MachineSchedPolicy &Policy,
+//                     unsigned NumRegionInstrs) const {
+//   Policy.<Flag> = true;
+// }
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINESCHEDULER_H
+#define LLVM_CODEGEN_MACHINESCHEDULER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachinePassRegistry.h"
+#include "llvm/CodeGen/RegisterPressure.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/CodeGen/ScheduleDAGMutation.h"
+#include "llvm/CodeGen/TargetSchedule.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+extern cl::opt<bool> ForceTopDown;
+extern cl::opt<bool> ForceBottomUp;
+
+class LiveIntervals;
+class MachineDominatorTree;
+class MachineFunction;
+class MachineInstr;
+class MachineLoopInfo;
+class RegisterClassInfo;
+class SchedDFSResult;
+class ScheduleHazardRecognizer;
+class TargetInstrInfo;
+class TargetPassConfig;
+class TargetRegisterInfo;
+
+/// MachineSchedContext provides enough context from the MachineScheduler pass
+/// for the target to instantiate a scheduler.
+struct MachineSchedContext {
+  MachineFunction *MF = nullptr;
+  const MachineLoopInfo *MLI = nullptr;
+  const MachineDominatorTree *MDT = nullptr;
+  const TargetPassConfig *PassConfig = nullptr;
+  AliasAnalysis *AA = nullptr;
+  LiveIntervals *LIS = nullptr;
+
+  RegisterClassInfo *RegClassInfo;
+
+  MachineSchedContext();
+  virtual ~MachineSchedContext();
+};
+
+/// MachineSchedRegistry provides a selection of available machine instruction
+/// schedulers.
+class MachineSchedRegistry : public MachinePassRegistryNode {
+public:
+  using ScheduleDAGCtor = ScheduleDAGInstrs *(*)(MachineSchedContext *);
+
+  // RegisterPassParser requires a (misnamed) FunctionPassCtor type.
+  using FunctionPassCtor = ScheduleDAGCtor;
+
+  static MachinePassRegistry Registry;
+
+  MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
+    : MachinePassRegistryNode(N, D, (MachinePassCtor)C) {
+    Registry.Add(this);
+  }
+
+  ~MachineSchedRegistry() { Registry.Remove(this); }
+
+  // Accessors.
+  //
+  MachineSchedRegistry *getNext() const {
+    return (MachineSchedRegistry *)MachinePassRegistryNode::getNext();
+  }
+
+  static MachineSchedRegistry *getList() {
+    return (MachineSchedRegistry *)Registry.getList();
+  }
+
+  static void setListener(MachinePassRegistryListener *L) {
+    Registry.setListener(L);
+  }
+};
+
+class ScheduleDAGMI;
+
+/// Define a generic scheduling policy for targets that don't provide their own
+/// MachineSchedStrategy. This can be overriden for each scheduling region
+/// before building the DAG.
+struct MachineSchedPolicy {
+  // Allow the scheduler to disable register pressure tracking.
+  bool ShouldTrackPressure = false;
+  /// Track LaneMasks to allow reordering of independent subregister writes
+  /// of the same vreg. \sa MachineSchedStrategy::shouldTrackLaneMasks()
+  bool ShouldTrackLaneMasks = false;
+
+  // Allow the scheduler to force top-down or bottom-up scheduling. If neither
+  // is true, the scheduler runs in both directions and converges.
+  bool OnlyTopDown = false;
+  bool OnlyBottomUp = false;
+
+  // Disable heuristic that tries to fetch nodes from long dependency chains
+  // first.
+  bool DisableLatencyHeuristic = false;
+
+  MachineSchedPolicy() = default;
+};
+
+/// MachineSchedStrategy - Interface to the scheduling algorithm used by
+/// ScheduleDAGMI.
+///
+/// Initialization sequence:
+///   initPolicy -> shouldTrackPressure -> initialize(DAG) -> registerRoots
+class MachineSchedStrategy {
+  virtual void anchor();
+
+public:
+  virtual ~MachineSchedStrategy() = default;
+
+  /// Optionally override the per-region scheduling policy.
+  virtual void initPolicy(MachineBasicBlock::iterator Begin,
+                          MachineBasicBlock::iterator End,
+                          unsigned NumRegionInstrs) {}
+
+  virtual void dumpPolicy() const {}
+
+  /// Check if pressure tracking is needed before building the DAG and
+  /// initializing this strategy. Called after initPolicy.
+  virtual bool shouldTrackPressure() const { return true; }
+
+  /// Returns true if lanemasks should be tracked. LaneMask tracking is
+  /// necessary to reorder independent subregister defs for the same vreg.
+  /// This has to be enabled in combination with shouldTrackPressure().
+  virtual bool shouldTrackLaneMasks() const { return false; }
+
+  // If this method returns true, handling of the scheduling regions
+  // themselves (in case of a scheduling boundary in MBB) will be done
+  // beginning with the topmost region of MBB.
+  virtual bool doMBBSchedRegionsTopDown() const { return false; }
+
+  /// Initialize the strategy after building the DAG for a new region.
+  virtual void initialize(ScheduleDAGMI *DAG) = 0;
+
+  /// Tell the strategy that MBB is about to be processed.
+  virtual void enterMBB(MachineBasicBlock *MBB) {};
+
+  /// Tell the strategy that current MBB is done.
+  virtual void leaveMBB() {};
+
+  /// Notify this strategy that all roots have been released (including those
+  /// that depend on EntrySU or ExitSU).
+  virtual void registerRoots() {}
+
+  /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to
+  /// schedule the node at the top of the unscheduled region. Otherwise it will
+  /// be scheduled at the bottom.
+  virtual SUnit *pickNode(bool &IsTopNode) = 0;
+
+  /// \brief Scheduler callback to notify that a new subtree is scheduled.
+  virtual void scheduleTree(unsigned SubtreeID) {}
+
+  /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an
+  /// instruction and updated scheduled/remaining flags in the DAG nodes.
+  virtual void schedNode(SUnit *SU, bool IsTopNode) = 0;
+
+  /// When all predecessor dependencies have been resolved, free this node for
+  /// top-down scheduling.
+  virtual void releaseTopNode(SUnit *SU) = 0;
+
+  /// When all successor dependencies have been resolved, free this node for
+  /// bottom-up scheduling.
+  virtual void releaseBottomNode(SUnit *SU) = 0;
+};
+
+/// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply
+/// schedules machine instructions according to the given MachineSchedStrategy
+/// without much extra book-keeping. This is the common functionality between
+/// PreRA and PostRA MachineScheduler.
+class ScheduleDAGMI : public ScheduleDAGInstrs {
+protected:
+  AliasAnalysis *AA;
+  LiveIntervals *LIS;
+  std::unique_ptr<MachineSchedStrategy> SchedImpl;
+
+  /// Topo - A topological ordering for SUnits which permits fast IsReachable
+  /// and similar queries.
+  ScheduleDAGTopologicalSort Topo;
+
+  /// Ordered list of DAG postprocessing steps.
+  std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
+
+  /// The top of the unscheduled zone.
+  MachineBasicBlock::iterator CurrentTop;
+
+  /// The bottom of the unscheduled zone.
+  MachineBasicBlock::iterator CurrentBottom;
+
+  /// Record the next node in a scheduled cluster.
+  const SUnit *NextClusterPred = nullptr;
+  const SUnit *NextClusterSucc = nullptr;
+
+#ifndef NDEBUG
+  /// The number of instructions scheduled so far. Used to cut off the
+  /// scheduler at the point determined by misched-cutoff.
+  unsigned NumInstrsScheduled = 0;
+#endif
+
+public:
+  ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr<MachineSchedStrategy> S,
+                bool RemoveKillFlags)
+      : ScheduleDAGInstrs(*C->MF, C->MLI, RemoveKillFlags), AA(C->AA),
+        LIS(C->LIS), SchedImpl(std::move(S)), Topo(SUnits, &ExitSU) {}
+
+  // Provide a vtable anchor
+  ~ScheduleDAGMI() override;
+
+  /// If this method returns true, handling of the scheduling regions
+  /// themselves (in case of a scheduling boundary in MBB) will be done
+  /// beginning with the topmost region of MBB.
+  bool doMBBSchedRegionsTopDown() const override {
+    return SchedImpl->doMBBSchedRegionsTopDown();
+  }
+
+  // Returns LiveIntervals instance for use in DAG mutators and such.
+  LiveIntervals *getLIS() const { return LIS; }
+
+  /// Return true if this DAG supports VReg liveness and RegPressure.
+  virtual bool hasVRegLiveness() const { return false; }
+
+  /// Add a postprocessing step to the DAG builder.
+  /// Mutations are applied in the order that they are added after normal DAG
+  /// building and before MachineSchedStrategy initialization.
+  ///
+  /// ScheduleDAGMI takes ownership of the Mutation object.
+  void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
+    if (Mutation)
+      Mutations.push_back(std::move(Mutation));
+  }
+
+  /// \brief True if an edge can be added from PredSU to SuccSU without creating
+  /// a cycle.
+  bool canAddEdge(SUnit *SuccSU, SUnit *PredSU);
+
+  /// \brief Add a DAG edge to the given SU with the given predecessor
+  /// dependence data.
+  ///
+  /// \returns true if the edge may be added without creating a cycle OR if an
+  /// equivalent edge already existed (false indicates failure).
+  bool addEdge(SUnit *SuccSU, const SDep &PredDep);
+
+  MachineBasicBlock::iterator top() const { return CurrentTop; }
+  MachineBasicBlock::iterator bottom() const { return CurrentBottom; }
+
+  /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
+  /// region. This covers all instructions in a block, while schedule() may only
+  /// cover a subset.
+  void enterRegion(MachineBasicBlock *bb,
+                   MachineBasicBlock::iterator begin,
+                   MachineBasicBlock::iterator end,
+                   unsigned regioninstrs) override;
+
+  /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
+  /// reorderable instructions.
+  void schedule() override;
+
+  void startBlock(MachineBasicBlock *bb) override;
+  void finishBlock() override;
+
+  /// Change the position of an instruction within the basic block and update
+  /// live ranges and region boundary iterators.
+  void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);
+
+  const SUnit *getNextClusterPred() const { return NextClusterPred; }
+
+  const SUnit *getNextClusterSucc() const { return NextClusterSucc; }
+
+  void viewGraph(const Twine &Name, const Twine &Title) override;
+  void viewGraph() override;
+
+protected:
+  // Top-Level entry points for the schedule() driver...
+
+  /// Apply each ScheduleDAGMutation step in order. This allows different
+  /// instances of ScheduleDAGMI to perform custom DAG postprocessing.
+  void postprocessDAG();
+
+  /// Release ExitSU predecessors and setup scheduler queues.
+  void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
+
+  /// Update scheduler DAG and queues after scheduling an instruction.
+  void updateQueues(SUnit *SU, bool IsTopNode);
+
+  /// Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues.
+  void placeDebugValues();
+
+  /// \brief dump the scheduled Sequence.
+  void dumpSchedule() const;
+
+  // Lesser helpers...
+  bool checkSchedLimit();
+
+  void findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
+                             SmallVectorImpl<SUnit*> &BotRoots);
+
+  void releaseSucc(SUnit *SU, SDep *SuccEdge);
+  void releaseSuccessors(SUnit *SU);
+  void releasePred(SUnit *SU, SDep *PredEdge);
+  void releasePredecessors(SUnit *SU);
+};
+
+/// ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules
+/// machine instructions while updating LiveIntervals and tracking regpressure.
+class ScheduleDAGMILive : public ScheduleDAGMI {
+protected:
+  RegisterClassInfo *RegClassInfo;
+
+  /// Information about DAG subtrees. If DFSResult is NULL, then SchedulerTrees
+  /// will be empty.
+  SchedDFSResult *DFSResult = nullptr;
+  BitVector ScheduledTrees;
+
+  MachineBasicBlock::iterator LiveRegionEnd;
+
+  /// Maps vregs to the SUnits of their uses in the current scheduling region.
+  VReg2SUnitMultiMap VRegUses;
+
+  // Map each SU to its summary of pressure changes. This array is updated for
+  // liveness during bottom-up scheduling. Top-down scheduling may proceed but
+  // has no affect on the pressure diffs.
+  PressureDiffs SUPressureDiffs;
+
+  /// Register pressure in this region computed by initRegPressure.
+  bool ShouldTrackPressure = false;
+  bool ShouldTrackLaneMasks = false;
+  IntervalPressure RegPressure;
+  RegPressureTracker RPTracker;
+
+  /// List of pressure sets that exceed the target's pressure limit before
+  /// scheduling, listed in increasing set ID order. Each pressure set is paired
+  /// with its max pressure in the currently scheduled regions.
+  std::vector<PressureChange> RegionCriticalPSets;
+
+  /// The top of the unscheduled zone.
+  IntervalPressure TopPressure;
+  RegPressureTracker TopRPTracker;
+
+  /// The bottom of the unscheduled zone.
+  IntervalPressure BotPressure;
+  RegPressureTracker BotRPTracker;
+
+  /// True if disconnected subregister components are already renamed.
+  /// The renaming is only done on demand if lane masks are tracked.
+  bool DisconnectedComponentsRenamed = false;
+
+public:
+  ScheduleDAGMILive(MachineSchedContext *C,
+                    std::unique_ptr<MachineSchedStrategy> S)
+      : ScheduleDAGMI(C, std::move(S), /*RemoveKillFlags=*/false),
+        RegClassInfo(C->RegClassInfo), RPTracker(RegPressure),
+        TopRPTracker(TopPressure), BotRPTracker(BotPressure) {}
+
+  ~ScheduleDAGMILive() override;
+
+  /// Return true if this DAG supports VReg liveness and RegPressure.
+  bool hasVRegLiveness() const override { return true; }
+
+  /// \brief Return true if register pressure tracking is enabled.
+  bool isTrackingPressure() const { return ShouldTrackPressure; }
+
+  /// Get current register pressure for the top scheduled instructions.
+  const IntervalPressure &getTopPressure() const { return TopPressure; }
+  const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; }
+
+  /// Get current register pressure for the bottom scheduled instructions.
+  const IntervalPressure &getBotPressure() const { return BotPressure; }
+  const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; }
+
+  /// Get register pressure for the entire scheduling region before scheduling.
+  const IntervalPressure &getRegPressure() const { return RegPressure; }
+
+  const std::vector<PressureChange> &getRegionCriticalPSets() const {
+    return RegionCriticalPSets;
+  }
+
+  PressureDiff &getPressureDiff(const SUnit *SU) {
+    return SUPressureDiffs[SU->NodeNum];
+  }
+
+  /// Compute a DFSResult after DAG building is complete, and before any
+  /// queue comparisons.
+  void computeDFSResult();
+
+  /// Return a non-null DFS result if the scheduling strategy initialized it.
+  const SchedDFSResult *getDFSResult() const { return DFSResult; }
+
+  BitVector &getScheduledTrees() { return ScheduledTrees; }
+
+  /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
+  /// region. This covers all instructions in a block, while schedule() may only
+  /// cover a subset.
+  void enterRegion(MachineBasicBlock *bb,
+                   MachineBasicBlock::iterator begin,
+                   MachineBasicBlock::iterator end,
+                   unsigned regioninstrs) override;
+
+  /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
+  /// reorderable instructions.
+  void schedule() override;
+
+  /// Compute the cyclic critical path through the DAG.
+  unsigned computeCyclicCriticalPath();
+
+protected:
+  // Top-Level entry points for the schedule() driver...
+
+  /// Call ScheduleDAGInstrs::buildSchedGraph with register pressure tracking
+  /// enabled. This sets up three trackers. RPTracker will cover the entire DAG
+  /// region, TopTracker and BottomTracker will be initialized to the top and
+  /// bottom of the DAG region without covereing any unscheduled instruction.
+  void buildDAGWithRegPressure();
+
+  /// Release ExitSU predecessors and setup scheduler queues. Re-position
+  /// the Top RP tracker in case the region beginning has changed.
+  void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
+
+  /// Move an instruction and update register pressure.
+  void scheduleMI(SUnit *SU, bool IsTopNode);
+
+  // Lesser helpers...
+
+  void initRegPressure();
+
+  void updatePressureDiffs(ArrayRef<RegisterMaskPair> LiveUses);
+
+  void updateScheduledPressure(const SUnit *SU,
+                               const std::vector<unsigned> &NewMaxPressure);
+
+  void collectVRegUses(SUnit &SU);
+};
+
+//===----------------------------------------------------------------------===//
+///
+/// Helpers for implementing custom MachineSchedStrategy classes. These take
+/// care of the book-keeping associated with list scheduling heuristics.
+///
+//===----------------------------------------------------------------------===//
+
+/// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience
+/// methods for pushing and removing nodes. ReadyQueue's are uniquely identified
+/// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in.
+///
+/// This is a convenience class that may be used by implementations of
+/// MachineSchedStrategy.
+class ReadyQueue {
+  unsigned ID;
+  std::string Name;
+  std::vector<SUnit*> Queue;
+
+public:
+  ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
+
+  unsigned getID() const { return ID; }
+
+  StringRef getName() const { return Name; }
+
+  // SU is in this queue if it's NodeQueueID is a superset of this ID.
+  bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
+
+  bool empty() const { return Queue.empty(); }
+
+  void clear() { Queue.clear(); }
+
+  unsigned size() const { return Queue.size(); }
+
+  using iterator = std::vector<SUnit*>::iterator;
+
+  iterator begin() { return Queue.begin(); }
+
+  iterator end() { return Queue.end(); }
+
+  ArrayRef<SUnit*> elements() { return Queue; }
+
+  iterator find(SUnit *SU) { return llvm::find(Queue, SU); }
+
+  void push(SUnit *SU) {
+    Queue.push_back(SU);
+    SU->NodeQueueId |= ID;
+  }
+
+  iterator remove(iterator I) {
+    (*I)->NodeQueueId &= ~ID;
+    *I = Queue.back();
+    unsigned idx = I - Queue.begin();
+    Queue.pop_back();
+    return Queue.begin() + idx;
+  }
+
+  void dump() const;
+};
+
+/// Summarize the unscheduled region.
+struct SchedRemainder {
+  // Critical path through the DAG in expected latency.
+  unsigned CriticalPath;
+  unsigned CyclicCritPath;
+
+  // Scaled count of micro-ops left to schedule.
+  unsigned RemIssueCount;
+
+  bool IsAcyclicLatencyLimited;
+
+  // Unscheduled resources
+  SmallVector<unsigned, 16> RemainingCounts;
+
+  SchedRemainder() { reset(); }
+
+  void reset() {
+    CriticalPath = 0;
+    CyclicCritPath = 0;
+    RemIssueCount = 0;
+    IsAcyclicLatencyLimited = false;
+    RemainingCounts.clear();
+  }
+
+  void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
+};
+
+/// Each Scheduling boundary is associated with ready queues. It tracks the
+/// current cycle in the direction of movement, and maintains the state
+/// of "hazards" and other interlocks at the current cycle.
+class SchedBoundary {
+public:
+  /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
+  enum {
+    TopQID = 1,
+    BotQID = 2,
+    LogMaxQID = 2
+  };
+
+  ScheduleDAGMI *DAG = nullptr;
+  const TargetSchedModel *SchedModel = nullptr;
+  SchedRemainder *Rem = nullptr;
+
+  ReadyQueue Available;
+  ReadyQueue Pending;
+
+  ScheduleHazardRecognizer *HazardRec = nullptr;
+
+private:
+  /// True if the pending Q should be checked/updated before scheduling another
+  /// instruction.
+  bool CheckPending;
+
+  /// Number of cycles it takes to issue the instructions scheduled in this
+  /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls.
+  /// See getStalls().
+  unsigned CurrCycle;
+
+  /// Micro-ops issued in the current cycle
+  unsigned CurrMOps;
+
+  /// MinReadyCycle - Cycle of the soonest available instruction.
+  unsigned MinReadyCycle;
+
+  // The expected latency of the critical path in this scheduled zone.
+  unsigned ExpectedLatency;
+
+  // The latency of dependence chains leading into this zone.
+  // For each node scheduled bottom-up: DLat = max DLat, N.Depth.
+  // For each cycle scheduled: DLat -= 1.
+  unsigned DependentLatency;
+
+  /// Count the scheduled (issued) micro-ops that can be retired by
+  /// time=CurrCycle assuming the first scheduled instr is retired at time=0.
+  unsigned RetiredMOps;
+
+  // Count scheduled resources that have been executed. Resources are
+  // considered executed if they become ready in the time that it takes to
+  // saturate any resource including the one in question. Counts are scaled
+  // for direct comparison with other resources. Counts can be compared with
+  // MOps * getMicroOpFactor and Latency * getLatencyFactor.
+  SmallVector<unsigned, 16> ExecutedResCounts;
+
+  /// Cache the max count for a single resource.
+  unsigned MaxExecutedResCount;
+
+  // Cache the critical resources ID in this scheduled zone.
+  unsigned ZoneCritResIdx;
+
+  // Is the scheduled region resource limited vs. latency limited.
+  bool IsResourceLimited;
+
+  // Record the highest cycle at which each resource has been reserved by a
+  // scheduled instruction.
+  SmallVector<unsigned, 16> ReservedCycles;
+
+#ifndef NDEBUG
+  // Remember the greatest possible stall as an upper bound on the number of
+  // times we should retry the pending queue because of a hazard.
+  unsigned MaxObservedStall;
+#endif
+
+public:
+  /// Pending queues extend the ready queues with the same ID and the
+  /// PendingFlag set.
+  SchedBoundary(unsigned ID, const Twine &Name):
+    Available(ID, Name+".A"), Pending(ID << LogMaxQID, Name+".P") {
+    reset();
+  }
+
+  ~SchedBoundary();
+
+  void reset();
+
+  void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
+            SchedRemainder *rem);
+
+  bool isTop() const {
+    return Available.getID() == TopQID;
+  }
+
+  /// Number of cycles to issue the instructions scheduled in this zone.
+  unsigned getCurrCycle() const { return CurrCycle; }
+
+  /// Micro-ops issued in the current cycle
+  unsigned getCurrMOps() const { return CurrMOps; }
+
+  // The latency of dependence chains leading into this zone.
+  unsigned getDependentLatency() const { return DependentLatency; }
+
+  /// Get the number of latency cycles "covered" by the scheduled
+  /// instructions. This is the larger of the critical path within the zone
+  /// and the number of cycles required to issue the instructions.
+  unsigned getScheduledLatency() const {
+    return std::max(ExpectedLatency, CurrCycle);
+  }
+
+  unsigned getUnscheduledLatency(SUnit *SU) const {
+    return isTop() ? SU->getHeight() : SU->getDepth();
+  }
+
+  unsigned getResourceCount(unsigned ResIdx) const {
+    return ExecutedResCounts[ResIdx];
+  }
+
+  /// Get the scaled count of scheduled micro-ops and resources, including
+  /// executed resources.
+  unsigned getCriticalCount() const {
+    if (!ZoneCritResIdx)
+      return RetiredMOps * SchedModel->getMicroOpFactor();
+    return getResourceCount(ZoneCritResIdx);
+  }
+
+  /// Get a scaled count for the minimum execution time of the scheduled
+  /// micro-ops that are ready to execute by getExecutedCount. Notice the
+  /// feedback loop.
+  unsigned getExecutedCount() const {
+    return std::max(CurrCycle * SchedModel->getLatencyFactor(),
+                    MaxExecutedResCount);
+  }
+
+  unsigned getZoneCritResIdx() const { return ZoneCritResIdx; }
+
+  // Is the scheduled region resource limited vs. latency limited.
+  bool isResourceLimited() const { return IsResourceLimited; }
+
+  /// Get the difference between the given SUnit's ready time and the current
+  /// cycle.
+  unsigned getLatencyStallCycles(SUnit *SU);
+
+  unsigned getNextResourceCycle(unsigned PIdx, unsigned Cycles);
+
+  bool checkHazard(SUnit *SU);
+
+  unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs);
+
+  unsigned getOtherResourceCount(unsigned &OtherCritIdx);
+
+  void releaseNode(SUnit *SU, unsigned ReadyCycle);
+
+  void bumpCycle(unsigned NextCycle);
+
+  void incExecutedResources(unsigned PIdx, unsigned Count);
+
+  unsigned countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle);
+
+  void bumpNode(SUnit *SU);
+
+  void releasePending();
+
+  void removeReady(SUnit *SU);
+
+  /// Call this before applying any other heuristics to the Available queue.
+  /// Updates the Available/Pending Q's if necessary and returns the single
+  /// available instruction, or NULL if there are multiple candidates.
+  SUnit *pickOnlyChoice();
+
+  void dumpScheduledState() const;
+};
+
+/// Base class for GenericScheduler. This class maintains information about
+/// scheduling candidates based on TargetSchedModel making it easy to implement
+/// heuristics for either preRA or postRA scheduling.
+class GenericSchedulerBase : public MachineSchedStrategy {
+public:
+  /// Represent the type of SchedCandidate found within a single queue.
+  /// pickNodeBidirectional depends on these listed by decreasing priority.
+  enum CandReason : uint8_t {
+    NoCand, Only1, PhysRegCopy, RegExcess, RegCritical, Stall, Cluster, Weak,
+    RegMax, ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
+    TopDepthReduce, TopPathReduce, NextDefUse, NodeOrder};
+
+#ifndef NDEBUG
+  static const char *getReasonStr(GenericSchedulerBase::CandReason Reason);
+#endif
+
+  /// Policy for scheduling the next instruction in the candidate's zone.
+  struct CandPolicy {
+    bool ReduceLatency = false;
+    unsigned ReduceResIdx = 0;
+    unsigned DemandResIdx = 0;
+
+    CandPolicy() = default;
+
+    bool operator==(const CandPolicy &RHS) const {
+      return ReduceLatency == RHS.ReduceLatency &&
+             ReduceResIdx == RHS.ReduceResIdx &&
+             DemandResIdx == RHS.DemandResIdx;
+    }
+    bool operator!=(const CandPolicy &RHS) const {
+      return !(*this == RHS);
+    }
+  };
+
+  /// Status of an instruction's critical resource consumption.
+  struct SchedResourceDelta {
+    // Count critical resources in the scheduled region required by SU.
+    unsigned CritResources = 0;
+
+    // Count critical resources from another region consumed by SU.
+    unsigned DemandedResources = 0;
+
+    SchedResourceDelta() = default;
+
+    bool operator==(const SchedResourceDelta &RHS) const {
+      return CritResources == RHS.CritResources
+        && DemandedResources == RHS.DemandedResources;
+    }
+    bool operator!=(const SchedResourceDelta &RHS) const {
+      return !operator==(RHS);
+    }
+  };
+
+  /// Store the state used by GenericScheduler heuristics, required for the
+  /// lifetime of one invocation of pickNode().
+  struct SchedCandidate {
+    CandPolicy Policy;
+
+    // The best SUnit candidate.
+    SUnit *SU;
+
+    // The reason for this candidate.
+    CandReason Reason;
+
+    // Whether this candidate should be scheduled at top/bottom.
+    bool AtTop;
+
+    // Register pressure values for the best candidate.
+    RegPressureDelta RPDelta;
+
+    // Critical resource consumption of the best candidate.
+    SchedResourceDelta ResDelta;
+
+    SchedCandidate() { reset(CandPolicy()); }
+    SchedCandidate(const CandPolicy &Policy) { reset(Policy); }
+
+    void reset(const CandPolicy &NewPolicy) {
+      Policy = NewPolicy;
+      SU = nullptr;
+      Reason = NoCand;
+      AtTop = false;
+      RPDelta = RegPressureDelta();
+      ResDelta = SchedResourceDelta();
+    }
+
+    bool isValid() const { return SU; }
+
+    // Copy the status of another candidate without changing policy.
+    void setBest(SchedCandidate &Best) {
+      assert(Best.Reason != NoCand && "uninitialized Sched candidate");
+      SU = Best.SU;
+      Reason = Best.Reason;
+      AtTop = Best.AtTop;
+      RPDelta = Best.RPDelta;
+      ResDelta = Best.ResDelta;
+    }
+
+    void initResourceDelta(const ScheduleDAGMI *DAG,
+                           const TargetSchedModel *SchedModel);
+  };
+
+protected:
+  const MachineSchedContext *Context;
+  const TargetSchedModel *SchedModel = nullptr;
+  const TargetRegisterInfo *TRI = nullptr;
+
+  SchedRemainder Rem;
+
+  GenericSchedulerBase(const MachineSchedContext *C) : Context(C) {}
+
+  void setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone,
+                 SchedBoundary *OtherZone);
+
+#ifndef NDEBUG
+  void traceCandidate(const SchedCandidate &Cand);
+#endif
+};
+
+/// GenericScheduler shrinks the unscheduled zone using heuristics to balance
+/// the schedule.
+class GenericScheduler : public GenericSchedulerBase {
+public:
+  GenericScheduler(const MachineSchedContext *C):
+    GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ"),
+    Bot(SchedBoundary::BotQID, "BotQ") {}
+
+  void initPolicy(MachineBasicBlock::iterator Begin,
+                  MachineBasicBlock::iterator End,
+                  unsigned NumRegionInstrs) override;
+
+  void dumpPolicy() const override;
+
+  bool shouldTrackPressure() const override {
+    return RegionPolicy.ShouldTrackPressure;
+  }
+
+  bool shouldTrackLaneMasks() const override {
+    return RegionPolicy.ShouldTrackLaneMasks;
+  }
+
+  void initialize(ScheduleDAGMI *dag) override;
+
+  SUnit *pickNode(bool &IsTopNode) override;
+
+  void schedNode(SUnit *SU, bool IsTopNode) override;
+
+  void releaseTopNode(SUnit *SU) override {
+    if (SU->isScheduled)
+      return;
+
+    Top.releaseNode(SU, SU->TopReadyCycle);
+    TopCand.SU = nullptr;
+  }
+
+  void releaseBottomNode(SUnit *SU) override {
+    if (SU->isScheduled)
+      return;
+
+    Bot.releaseNode(SU, SU->BotReadyCycle);
+    BotCand.SU = nullptr;
+  }
+
+  void registerRoots() override;
+
+protected:
+  ScheduleDAGMILive *DAG = nullptr;
+
+  MachineSchedPolicy RegionPolicy;
+
+  // State of the top and bottom scheduled instruction boundaries.
+  SchedBoundary Top;
+  SchedBoundary Bot;
+
+  /// Candidate last picked from Top boundary.
+  SchedCandidate TopCand;
+  /// Candidate last picked from Bot boundary.
+  SchedCandidate BotCand;
+
+  void checkAcyclicLatency();
+
+  void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop,
+                     const RegPressureTracker &RPTracker,
+                     RegPressureTracker &TempTracker);
+
+  void tryCandidate(SchedCandidate &Cand,
+                    SchedCandidate &TryCand,
+                    SchedBoundary *Zone);
+
+  SUnit *pickNodeBidirectional(bool &IsTopNode);
+
+  void pickNodeFromQueue(SchedBoundary &Zone,
+                         const CandPolicy &ZonePolicy,
+                         const RegPressureTracker &RPTracker,
+                         SchedCandidate &Candidate);
+
+  void reschedulePhysRegCopies(SUnit *SU, bool isTop);
+};
+
+/// PostGenericScheduler - Interface to the scheduling algorithm used by
+/// ScheduleDAGMI.
+///
+/// Callbacks from ScheduleDAGMI:
+///   initPolicy -> initialize(DAG) -> registerRoots -> pickNode ...
+class PostGenericScheduler : public GenericSchedulerBase {
+  ScheduleDAGMI *DAG;
+  SchedBoundary Top;
+  SmallVector<SUnit*, 8> BotRoots;
+
+public:
+  PostGenericScheduler(const MachineSchedContext *C):
+    GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ") {}
+
+  ~PostGenericScheduler() override = default;
+
+  void initPolicy(MachineBasicBlock::iterator Begin,
+                  MachineBasicBlock::iterator End,
+                  unsigned NumRegionInstrs) override {
+    /* no configurable policy */
+  }
+
+  /// PostRA scheduling does not track pressure.
+  bool shouldTrackPressure() const override { return false; }
+
+  void initialize(ScheduleDAGMI *Dag) override;
+
+  void registerRoots() override;
+
+  SUnit *pickNode(bool &IsTopNode) override;
+
+  void scheduleTree(unsigned SubtreeID) override {
+    llvm_unreachable("PostRA scheduler does not support subtree analysis.");
+  }
+
+  void schedNode(SUnit *SU, bool IsTopNode) override;
+
+  void releaseTopNode(SUnit *SU) override {
+    if (SU->isScheduled)
+      return;
+    Top.releaseNode(SU, SU->TopReadyCycle);
+  }
+
+  // Only called for roots.
+  void releaseBottomNode(SUnit *SU) override {
+    BotRoots.push_back(SU);
+  }
+
+protected:
+  void tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand);
+
+  void pickNodeFromQueue(SchedCandidate &Cand);
+};
+
+/// Create the standard converging machine scheduler. This will be used as the
+/// default scheduler if the target does not set a default.
+/// Adds default DAG mutations.
+ScheduleDAGMILive *createGenericSchedLive(MachineSchedContext *C);
+
+/// Create a generic scheduler with no vreg liveness or DAG mutation passes.
+ScheduleDAGMI *createGenericSchedPostRA(MachineSchedContext *C);
+
+std::unique_ptr<ScheduleDAGMutation>
+createLoadClusterDAGMutation(const TargetInstrInfo *TII,
+                             const TargetRegisterInfo *TRI);
+
+std::unique_ptr<ScheduleDAGMutation>
+createStoreClusterDAGMutation(const TargetInstrInfo *TII,
+                              const TargetRegisterInfo *TRI);
+
+std::unique_ptr<ScheduleDAGMutation>
+createCopyConstrainDAGMutation(const TargetInstrInfo *TII,
+                               const TargetRegisterInfo *TRI);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINESCHEDULER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MachineTraceMetrics.h b/linux-x64/clang/include/llvm/CodeGen/MachineTraceMetrics.h
new file mode 100644
index 0000000..9d8db39
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MachineTraceMetrics.h
@@ -0,0 +1,436 @@
+//===- lib/CodeGen/MachineTraceMetrics.h - Super-scalar metrics -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface for the MachineTraceMetrics analysis pass
+// that estimates CPU resource usage and critical data dependency paths through
+// preferred traces. This is useful for super-scalar CPUs where execution speed
+// can be limited both by data dependencies and by limited execution resources.
+//
+// Out-of-order CPUs will often be executing instructions from multiple basic
+// blocks at the same time. This makes it difficult to estimate the resource
+// usage accurately in a single basic block. Resources can be estimated better
+// by looking at a trace through the current basic block.
+//
+// For every block, the MachineTraceMetrics pass will pick a preferred trace
+// that passes through the block. The trace is chosen based on loop structure,
+// branch probabilities, and resource usage. The intention is to pick likely
+// traces that would be the most affected by code transformations.
+//
+// It is expensive to compute a full arbitrary trace for every block, so to
+// save some computations, traces are chosen to be convergent. This means that
+// if the traces through basic blocks A and B ever cross when moving away from
+// A and B, they never diverge again. This applies in both directions - If the
+// traces meet above A and B, they won't diverge when going further back.
+//
+// Traces tend to align with loops. The trace through a block in an inner loop
+// will begin at the loop entry block and end at a back edge. If there are
+// nested loops, the trace may begin and end at those instead.
+//
+// For each trace, we compute the critical path length, which is the number of
+// cycles required to execute the trace when execution is limited by data
+// dependencies only. We also compute the resource height, which is the number
+// of cycles required to execute all instructions in the trace when ignoring
+// data dependencies.
+//
+// Every instruction in the current block has a slack - the number of cycles
+// execution of the instruction can be delayed without extending the critical
+// path.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINETRACEMETRICS_H
+#define LLVM_CODEGEN_MACHINETRACEMETRICS_H
+
+#include "llvm/ADT/SparseSet.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/TargetSchedule.h"
+
+namespace llvm {
+
+class AnalysisUsage;
+class MachineFunction;
+class MachineInstr;
+class MachineLoop;
+class MachineLoopInfo;
+class MachineRegisterInfo;
+struct MCSchedClassDesc;
+class raw_ostream;
+class TargetInstrInfo;
+class TargetRegisterInfo;
+
+// Keep track of physreg data dependencies by recording each live register unit.
+// Associate each regunit with an instruction operand. Depending on the
+// direction instructions are scanned, it could be the operand that defined the
+// regunit, or the highest operand to read the regunit.
+struct LiveRegUnit {
+  unsigned RegUnit;
+  unsigned Cycle = 0;
+  const MachineInstr *MI = nullptr;
+  unsigned Op = 0;
+
+  unsigned getSparseSetIndex() const { return RegUnit; }
+
+  LiveRegUnit(unsigned RU) : RegUnit(RU) {}
+};
+
+
+class MachineTraceMetrics : public MachineFunctionPass {
+  const MachineFunction *MF = nullptr;
+  const TargetInstrInfo *TII = nullptr;
+  const TargetRegisterInfo *TRI = nullptr;
+  const MachineRegisterInfo *MRI = nullptr;
+  const MachineLoopInfo *Loops = nullptr;
+  TargetSchedModel SchedModel;
+
+public:
+  friend class Ensemble;
+  friend class Trace;
+
+  class Ensemble;
+
+  static char ID;
+
+  MachineTraceMetrics();
+
+  void getAnalysisUsage(AnalysisUsage&) const override;
+  bool runOnMachineFunction(MachineFunction&) override;
+  void releaseMemory() override;
+  void verifyAnalysis() const override;
+
+  /// Per-basic block information that doesn't depend on the trace through the
+  /// block.
+  struct FixedBlockInfo {
+    /// The number of non-trivial instructions in the block.
+    /// Doesn't count PHI and COPY instructions that are likely to be removed.
+    unsigned InstrCount = ~0u;
+
+    /// True when the block contains calls.
+    bool HasCalls = false;
+
+    FixedBlockInfo() = default;
+
+    /// Returns true when resource information for this block has been computed.
+    bool hasResources() const { return InstrCount != ~0u; }
+
+    /// Invalidate resource information.
+    void invalidate() { InstrCount = ~0u; }
+  };
+
+  /// Get the fixed resource information about MBB. Compute it on demand.
+  const FixedBlockInfo *getResources(const MachineBasicBlock*);
+
+  /// Get the scaled number of cycles used per processor resource in MBB.
+  /// This is an array with SchedModel.getNumProcResourceKinds() entries.
+  /// The getResources() function above must have been called first.
+  ///
+  /// These numbers have already been scaled by SchedModel.getResourceFactor().
+  ArrayRef<unsigned> getProcResourceCycles(unsigned MBBNum) const;
+
+  /// A virtual register or regunit required by a basic block or its trace
+  /// successors.
+  struct LiveInReg {
+    /// The virtual register required, or a register unit.
+    unsigned Reg;
+
+    /// For virtual registers: Minimum height of the defining instruction.
+    /// For regunits: Height of the highest user in the trace.
+    unsigned Height;
+
+    LiveInReg(unsigned Reg, unsigned Height = 0) : Reg(Reg), Height(Height) {}
+  };
+
+  /// Per-basic block information that relates to a specific trace through the
+  /// block. Convergent traces means that only one of these is required per
+  /// block in a trace ensemble.
+  struct TraceBlockInfo {
+    /// Trace predecessor, or NULL for the first block in the trace.
+    /// Valid when hasValidDepth().
+    const MachineBasicBlock *Pred = nullptr;
+
+    /// Trace successor, or NULL for the last block in the trace.
+    /// Valid when hasValidHeight().
+    const MachineBasicBlock *Succ = nullptr;
+
+    /// The block number of the head of the trace. (When hasValidDepth()).
+    unsigned Head;
+
+    /// The block number of the tail of the trace. (When hasValidHeight()).
+    unsigned Tail;
+
+    /// Accumulated number of instructions in the trace above this block.
+    /// Does not include instructions in this block.
+    unsigned InstrDepth = ~0u;
+
+    /// Accumulated number of instructions in the trace below this block.
+    /// Includes instructions in this block.
+    unsigned InstrHeight = ~0u;
+
+    TraceBlockInfo() = default;
+
+    /// Returns true if the depth resources have been computed from the trace
+    /// above this block.
+    bool hasValidDepth() const { return InstrDepth != ~0u; }
+
+    /// Returns true if the height resources have been computed from the trace
+    /// below this block.
+    bool hasValidHeight() const { return InstrHeight != ~0u; }
+
+    /// Invalidate depth resources when some block above this one has changed.
+    void invalidateDepth() { InstrDepth = ~0u; HasValidInstrDepths = false; }
+
+    /// Invalidate height resources when a block below this one has changed.
+    void invalidateHeight() { InstrHeight = ~0u; HasValidInstrHeights = false; }
+
+    /// Assuming that this is a dominator of TBI, determine if it contains
+    /// useful instruction depths. A dominating block can be above the current
+    /// trace head, and any dependencies from such a far away dominator are not
+    /// expected to affect the critical path.
+    ///
+    /// Also returns true when TBI == this.
+    bool isUsefulDominator(const TraceBlockInfo &TBI) const {
+      // The trace for TBI may not even be calculated yet.
+      if (!hasValidDepth() || !TBI.hasValidDepth())
+        return false;
+      // Instruction depths are only comparable if the traces share a head.
+      if (Head != TBI.Head)
+        return false;
+      // It is almost always the case that TBI belongs to the same trace as
+      // this block, but rare convoluted cases involving irreducible control
+      // flow, a dominator may share a trace head without actually being on the
+      // same trace as TBI. This is not a big problem as long as it doesn't
+      // increase the instruction depth.
+      return HasValidInstrDepths && InstrDepth <= TBI.InstrDepth;
+    }
+
+    // Data-dependency-related information. Per-instruction depth and height
+    // are computed from data dependencies in the current trace, using
+    // itinerary data.
+
+    /// Instruction depths have been computed. This implies hasValidDepth().
+    bool HasValidInstrDepths = false;
+
+    /// Instruction heights have been computed. This implies hasValidHeight().
+    bool HasValidInstrHeights = false;
+
+    /// Critical path length. This is the number of cycles in the longest data
+    /// dependency chain through the trace. This is only valid when both
+    /// HasValidInstrDepths and HasValidInstrHeights are set.
+    unsigned CriticalPath;
+
+    /// Live-in registers. These registers are defined above the current block
+    /// and used by this block or a block below it.
+    /// This does not include PHI uses in the current block, but it does
+    /// include PHI uses in deeper blocks.
+    SmallVector<LiveInReg, 4> LiveIns;
+
+    void print(raw_ostream&) const;
+  };
+
+  /// InstrCycles represents the cycle height and depth of an instruction in a
+  /// trace.
+  struct InstrCycles {
+    /// Earliest issue cycle as determined by data dependencies and instruction
+    /// latencies from the beginning of the trace. Data dependencies from
+    /// before the trace are not included.
+    unsigned Depth;
+
+    /// Minimum number of cycles from this instruction is issued to the of the
+    /// trace, as determined by data dependencies and instruction latencies.
+    unsigned Height;
+  };
+
+  /// A trace represents a plausible sequence of executed basic blocks that
+  /// passes through the current basic block one. The Trace class serves as a
+  /// handle to internal cached data structures.
+  class Trace {
+    Ensemble &TE;
+    TraceBlockInfo &TBI;
+
+    unsigned getBlockNum() const { return &TBI - &TE.BlockInfo[0]; }
+
+  public:
+    explicit Trace(Ensemble &te, TraceBlockInfo &tbi) : TE(te), TBI(tbi) {}
+
+    void print(raw_ostream&) const;
+
+    /// Compute the total number of instructions in the trace.
+    unsigned getInstrCount() const {
+      return TBI.InstrDepth + TBI.InstrHeight;
+    }
+
+    /// Return the resource depth of the top/bottom of the trace center block.
+    /// This is the number of cycles required to execute all instructions from
+    /// the trace head to the trace center block. The resource depth only
+    /// considers execution resources, it ignores data dependencies.
+    /// When Bottom is set, instructions in the trace center block are included.
+    unsigned getResourceDepth(bool Bottom) const;
+
+    /// Return the resource length of the trace. This is the number of cycles
+    /// required to execute the instructions in the trace if they were all
+    /// independent, exposing the maximum instruction-level parallelism.
+    ///
+    /// Any blocks in Extrablocks are included as if they were part of the
+    /// trace. Likewise, extra resources required by the specified scheduling
+    /// classes are included. For the caller to account for extra machine
+    /// instructions, it must first resolve each instruction's scheduling class.
+    unsigned getResourceLength(
+        ArrayRef<const MachineBasicBlock *> Extrablocks = None,
+        ArrayRef<const MCSchedClassDesc *> ExtraInstrs = None,
+        ArrayRef<const MCSchedClassDesc *> RemoveInstrs = None) const;
+
+    /// Return the length of the (data dependency) critical path through the
+    /// trace.
+    unsigned getCriticalPath() const { return TBI.CriticalPath; }
+
+    /// Return the depth and height of MI. The depth is only valid for
+    /// instructions in or above the trace center block. The height is only
+    /// valid for instructions in or below the trace center block.
+    InstrCycles getInstrCycles(const MachineInstr &MI) const {
+      return TE.Cycles.lookup(&MI);
+    }
+
+    /// Return the slack of MI. This is the number of cycles MI can be delayed
+    /// before the critical path becomes longer.
+    /// MI must be an instruction in the trace center block.
+    unsigned getInstrSlack(const MachineInstr &MI) const;
+
+    /// Return the Depth of a PHI instruction in a trace center block successor.
+    /// The PHI does not have to be part of the trace.
+    unsigned getPHIDepth(const MachineInstr &PHI) const;
+
+    /// A dependence is useful if the basic block of the defining instruction
+    /// is part of the trace of the user instruction. It is assumed that DefMI
+    /// dominates UseMI (see also isUsefulDominator).
+    bool isDepInTrace(const MachineInstr &DefMI,
+                      const MachineInstr &UseMI) const;
+  };
+
+  /// A trace ensemble is a collection of traces selected using the same
+  /// strategy, for example 'minimum resource height'. There is one trace for
+  /// every block in the function.
+  class Ensemble {
+    friend class Trace;
+
+    SmallVector<TraceBlockInfo, 4> BlockInfo;
+    DenseMap<const MachineInstr*, InstrCycles> Cycles;
+    SmallVector<unsigned, 0> ProcResourceDepths;
+    SmallVector<unsigned, 0> ProcResourceHeights;
+
+    void computeTrace(const MachineBasicBlock*);
+    void computeDepthResources(const MachineBasicBlock*);
+    void computeHeightResources(const MachineBasicBlock*);
+    unsigned computeCrossBlockCriticalPath(const TraceBlockInfo&);
+    void computeInstrDepths(const MachineBasicBlock*);
+    void computeInstrHeights(const MachineBasicBlock*);
+    void addLiveIns(const MachineInstr *DefMI, unsigned DefOp,
+                    ArrayRef<const MachineBasicBlock*> Trace);
+
+  protected:
+    MachineTraceMetrics &MTM;
+
+    explicit Ensemble(MachineTraceMetrics*);
+
+    virtual const MachineBasicBlock *pickTracePred(const MachineBasicBlock*) =0;
+    virtual const MachineBasicBlock *pickTraceSucc(const MachineBasicBlock*) =0;
+    const MachineLoop *getLoopFor(const MachineBasicBlock*) const;
+    const TraceBlockInfo *getDepthResources(const MachineBasicBlock*) const;
+    const TraceBlockInfo *getHeightResources(const MachineBasicBlock*) const;
+    ArrayRef<unsigned> getProcResourceDepths(unsigned MBBNum) const;
+    ArrayRef<unsigned> getProcResourceHeights(unsigned MBBNum) const;
+
+  public:
+    virtual ~Ensemble();
+
+    virtual const char *getName() const = 0;
+    void print(raw_ostream&) const;
+    void invalidate(const MachineBasicBlock *MBB);
+    void verify() const;
+
+    /// Get the trace that passes through MBB.
+    /// The trace is computed on demand.
+    Trace getTrace(const MachineBasicBlock *MBB);
+
+    /// Updates the depth of an machine instruction, given RegUnits.
+    void updateDepth(TraceBlockInfo &TBI, const MachineInstr&,
+                     SparseSet<LiveRegUnit> &RegUnits);
+    void updateDepth(const MachineBasicBlock *, const MachineInstr&,
+                     SparseSet<LiveRegUnit> &RegUnits);
+
+    /// Updates the depth of the instructions from Start to End.
+    void updateDepths(MachineBasicBlock::iterator Start,
+                      MachineBasicBlock::iterator End,
+                      SparseSet<LiveRegUnit> &RegUnits);
+
+  };
+
+  /// Strategies for selecting traces.
+  enum Strategy {
+    /// Select the trace through a block that has the fewest instructions.
+    TS_MinInstrCount,
+
+    TS_NumStrategies
+  };
+
+  /// Get the trace ensemble representing the given trace selection strategy.
+  /// The returned Ensemble object is owned by the MachineTraceMetrics analysis,
+  /// and valid for the lifetime of the analysis pass.
+  Ensemble *getEnsemble(Strategy);
+
+  /// Invalidate cached information about MBB. This must be called *before* MBB
+  /// is erased, or the CFG is otherwise changed.
+  ///
+  /// This invalidates per-block information about resource usage for MBB only,
+  /// and it invalidates per-trace information for any trace that passes
+  /// through MBB.
+  ///
+  /// Call Ensemble::getTrace() again to update any trace handles.
+  void invalidate(const MachineBasicBlock *MBB);
+
+private:
+  // One entry per basic block, indexed by block number.
+  SmallVector<FixedBlockInfo, 4> BlockInfo;
+
+  // Cycles consumed on each processor resource per block.
+  // The number of processor resource kinds is constant for a given subtarget,
+  // but it is not known at compile time. The number of cycles consumed by
+  // block B on processor resource R is at ProcResourceCycles[B*Kinds + R]
+  // where Kinds = SchedModel.getNumProcResourceKinds().
+  SmallVector<unsigned, 0> ProcResourceCycles;
+
+  // One ensemble per strategy.
+  Ensemble* Ensembles[TS_NumStrategies];
+
+  // Convert scaled resource usage to a cycle count that can be compared with
+  // latencies.
+  unsigned getCycles(unsigned Scaled) {
+    unsigned Factor = SchedModel.getLatencyFactor();
+    return (Scaled + Factor - 1) / Factor;
+  }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS,
+                               const MachineTraceMetrics::Trace &Tr) {
+  Tr.print(OS);
+  return OS;
+}
+
+inline raw_ostream &operator<<(raw_ostream &OS,
+                               const MachineTraceMetrics::Ensemble &En) {
+  En.print(OS);
+  return OS;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINETRACEMETRICS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/MacroFusion.h b/linux-x64/clang/include/llvm/CodeGen/MacroFusion.h
new file mode 100644
index 0000000..dc105fd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/MacroFusion.h
@@ -0,0 +1,50 @@
+//===- MacroFusion.h - Macro Fusion -----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file contains the definition of the DAG scheduling mutation to
+/// pair instructions back to back.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACROFUSION_H
+#define LLVM_CODEGEN_MACROFUSION_H
+
+#include <functional>
+#include <memory>
+
+namespace llvm {
+
+class MachineInstr;
+class ScheduleDAGMutation;
+class TargetInstrInfo;
+class TargetSubtargetInfo;
+
+/// \brief Check if the instr pair, FirstMI and SecondMI, should be fused
+/// together. Given SecondMI, when FirstMI is unspecified, then check if
+/// SecondMI may be part of a fused pair at all.
+using ShouldSchedulePredTy = std::function<bool(const TargetInstrInfo &TII,
+                                                const TargetSubtargetInfo &TSI,
+                                                const MachineInstr *FirstMI,
+                                                const MachineInstr &SecondMI)>;
+
+/// \brief Create a DAG scheduling mutation to pair instructions back to back
+/// for instructions that benefit according to the target-specific
+/// shouldScheduleAdjacent predicate function.
+std::unique_ptr<ScheduleDAGMutation>
+createMacroFusionDAGMutation(ShouldSchedulePredTy shouldScheduleAdjacent);
+
+/// \brief Create a DAG scheduling mutation to pair branch instructions with one
+/// of their predecessors back to back for instructions that benefit according
+/// to the target-specific shouldScheduleAdjacent predicate function.
+std::unique_ptr<ScheduleDAGMutation>
+createBranchMacroFusionDAGMutation(ShouldSchedulePredTy shouldScheduleAdjacent);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACROFUSION_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/PBQP/CostAllocator.h b/linux-x64/clang/include/llvm/CodeGen/PBQP/CostAllocator.h
new file mode 100644
index 0000000..bde451a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/PBQP/CostAllocator.h
@@ -0,0 +1,135 @@
+//===- CostAllocator.h - PBQP Cost Allocator --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines classes conforming to the PBQP cost value manager concept.
+//
+// Cost value managers are memory managers for PBQP cost values (vectors and
+// matrices). Since PBQP graphs can grow very large (E.g. hundreds of thousands
+// of edges on the largest function in SPEC2006).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PBQP_COSTALLOCATOR_H
+#define LLVM_CODEGEN_PBQP_COSTALLOCATOR_H
+
+#include "llvm/ADT/DenseSet.h"
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+namespace PBQP {
+
+template <typename ValueT> class ValuePool {
+public:
+  using PoolRef = std::shared_ptr<const ValueT>;
+
+private:
+  class PoolEntry : public std::enable_shared_from_this<PoolEntry> {
+  public:
+    template <typename ValueKeyT>
+    PoolEntry(ValuePool &Pool, ValueKeyT Value)
+        : Pool(Pool), Value(std::move(Value)) {}
+
+    ~PoolEntry() { Pool.removeEntry(this); }
+
+    const ValueT &getValue() const { return Value; }
+
+  private:
+    ValuePool &Pool;
+    ValueT Value;
+  };
+
+  class PoolEntryDSInfo {
+  public:
+    static inline PoolEntry *getEmptyKey() { return nullptr; }
+
+    static inline PoolEntry *getTombstoneKey() {
+      return reinterpret_cast<PoolEntry *>(static_cast<uintptr_t>(1));
+    }
+
+    template <typename ValueKeyT>
+    static unsigned getHashValue(const ValueKeyT &C) {
+      return hash_value(C);
+    }
+
+    static unsigned getHashValue(PoolEntry *P) {
+      return getHashValue(P->getValue());
+    }
+
+    static unsigned getHashValue(const PoolEntry *P) {
+      return getHashValue(P->getValue());
+    }
+
+    template <typename ValueKeyT1, typename ValueKeyT2>
+    static bool isEqual(const ValueKeyT1 &C1, const ValueKeyT2 &C2) {
+      return C1 == C2;
+    }
+
+    template <typename ValueKeyT>
+    static bool isEqual(const ValueKeyT &C, PoolEntry *P) {
+      if (P == getEmptyKey() || P == getTombstoneKey())
+        return false;
+      return isEqual(C, P->getValue());
+    }
+
+    static bool isEqual(PoolEntry *P1, PoolEntry *P2) {
+      if (P1 == getEmptyKey() || P1 == getTombstoneKey())
+        return P1 == P2;
+      return isEqual(P1->getValue(), P2);
+    }
+  };
+
+  using EntrySetT = DenseSet<PoolEntry *, PoolEntryDSInfo>;
+
+  EntrySetT EntrySet;
+
+  void removeEntry(PoolEntry *P) { EntrySet.erase(P); }
+
+public:
+  template <typename ValueKeyT> PoolRef getValue(ValueKeyT ValueKey) {
+    typename EntrySetT::iterator I = EntrySet.find_as(ValueKey);
+
+    if (I != EntrySet.end())
+      return PoolRef((*I)->shared_from_this(), &(*I)->getValue());
+
+    auto P = std::make_shared<PoolEntry>(*this, std::move(ValueKey));
+    EntrySet.insert(P.get());
+    return PoolRef(std::move(P), &P->getValue());
+  }
+};
+
+template <typename VectorT, typename MatrixT> class PoolCostAllocator {
+private:
+  using VectorCostPool = ValuePool<VectorT>;
+  using MatrixCostPool = ValuePool<MatrixT>;
+
+public:
+  using Vector = VectorT;
+  using Matrix = MatrixT;
+  using VectorPtr = typename VectorCostPool::PoolRef;
+  using MatrixPtr = typename MatrixCostPool::PoolRef;
+
+  template <typename VectorKeyT> VectorPtr getVector(VectorKeyT v) {
+    return VectorPool.getValue(std::move(v));
+  }
+
+  template <typename MatrixKeyT> MatrixPtr getMatrix(MatrixKeyT m) {
+    return MatrixPool.getValue(std::move(m));
+  }
+
+private:
+  VectorCostPool VectorPool;
+  MatrixCostPool MatrixPool;
+};
+
+} // end namespace PBQP
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_PBQP_COSTALLOCATOR_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/PBQP/Graph.h b/linux-x64/clang/include/llvm/CodeGen/PBQP/Graph.h
new file mode 100644
index 0000000..e94878c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/PBQP/Graph.h
@@ -0,0 +1,675 @@
+//===- Graph.h - PBQP Graph -------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// PBQP Graph class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PBQP_GRAPH_H
+#define LLVM_CODEGEN_PBQP_GRAPH_H
+
+#include "llvm/ADT/STLExtras.h"
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <limits>
+#include <vector>
+
+namespace llvm {
+namespace PBQP {
+
+  class GraphBase {
+  public:
+    using NodeId = unsigned;
+    using EdgeId = unsigned;
+
+    /// @brief Returns a value representing an invalid (non-existent) node.
+    static NodeId invalidNodeId() {
+      return std::numeric_limits<NodeId>::max();
+    }
+
+    /// @brief Returns a value representing an invalid (non-existent) edge.
+    static EdgeId invalidEdgeId() {
+      return std::numeric_limits<EdgeId>::max();
+    }
+  };
+
+  /// PBQP Graph class.
+  /// Instances of this class describe PBQP problems.
+  ///
+  template <typename SolverT>
+  class Graph : public GraphBase {
+  private:
+    using CostAllocator = typename SolverT::CostAllocator;
+
+  public:
+    using RawVector = typename SolverT::RawVector;
+    using RawMatrix = typename SolverT::RawMatrix;
+    using Vector = typename SolverT::Vector;
+    using Matrix = typename SolverT::Matrix;
+    using VectorPtr = typename CostAllocator::VectorPtr;
+    using MatrixPtr = typename CostAllocator::MatrixPtr;
+    using NodeMetadata = typename SolverT::NodeMetadata;
+    using EdgeMetadata = typename SolverT::EdgeMetadata;
+    using GraphMetadata = typename SolverT::GraphMetadata;
+
+  private:
+    class NodeEntry {
+    public:
+      using AdjEdgeList = std::vector<EdgeId>;
+      using AdjEdgeIdx = AdjEdgeList::size_type;
+      using AdjEdgeItr = AdjEdgeList::const_iterator;
+
+      NodeEntry(VectorPtr Costs) : Costs(std::move(Costs)) {}
+
+      static AdjEdgeIdx getInvalidAdjEdgeIdx() {
+        return std::numeric_limits<AdjEdgeIdx>::max();
+      }
+
+      AdjEdgeIdx addAdjEdgeId(EdgeId EId) {
+        AdjEdgeIdx Idx = AdjEdgeIds.size();
+        AdjEdgeIds.push_back(EId);
+        return Idx;
+      }
+
+      void removeAdjEdgeId(Graph &G, NodeId ThisNId, AdjEdgeIdx Idx) {
+        // Swap-and-pop for fast removal.
+        //   1) Update the adj index of the edge currently at back().
+        //   2) Move last Edge down to Idx.
+        //   3) pop_back()
+        // If Idx == size() - 1 then the setAdjEdgeIdx and swap are
+        // redundant, but both operations are cheap.
+        G.getEdge(AdjEdgeIds.back()).setAdjEdgeIdx(ThisNId, Idx);
+        AdjEdgeIds[Idx] = AdjEdgeIds.back();
+        AdjEdgeIds.pop_back();
+      }
+
+      const AdjEdgeList& getAdjEdgeIds() const { return AdjEdgeIds; }
+
+      VectorPtr Costs;
+      NodeMetadata Metadata;
+
+    private:
+      AdjEdgeList AdjEdgeIds;
+    };
+
+    class EdgeEntry {
+    public:
+      EdgeEntry(NodeId N1Id, NodeId N2Id, MatrixPtr Costs)
+          : Costs(std::move(Costs)) {
+        NIds[0] = N1Id;
+        NIds[1] = N2Id;
+        ThisEdgeAdjIdxs[0] = NodeEntry::getInvalidAdjEdgeIdx();
+        ThisEdgeAdjIdxs[1] = NodeEntry::getInvalidAdjEdgeIdx();
+      }
+
+      void connectToN(Graph &G, EdgeId ThisEdgeId, unsigned NIdx) {
+        assert(ThisEdgeAdjIdxs[NIdx] == NodeEntry::getInvalidAdjEdgeIdx() &&
+               "Edge already connected to NIds[NIdx].");
+        NodeEntry &N = G.getNode(NIds[NIdx]);
+        ThisEdgeAdjIdxs[NIdx] = N.addAdjEdgeId(ThisEdgeId);
+      }
+
+      void connect(Graph &G, EdgeId ThisEdgeId) {
+        connectToN(G, ThisEdgeId, 0);
+        connectToN(G, ThisEdgeId, 1);
+      }
+
+      void setAdjEdgeIdx(NodeId NId, typename NodeEntry::AdjEdgeIdx NewIdx) {
+        if (NId == NIds[0])
+          ThisEdgeAdjIdxs[0] = NewIdx;
+        else {
+          assert(NId == NIds[1] && "Edge not connected to NId");
+          ThisEdgeAdjIdxs[1] = NewIdx;
+        }
+      }
+
+      void disconnectFromN(Graph &G, unsigned NIdx) {
+        assert(ThisEdgeAdjIdxs[NIdx] != NodeEntry::getInvalidAdjEdgeIdx() &&
+               "Edge not connected to NIds[NIdx].");
+        NodeEntry &N = G.getNode(NIds[NIdx]);
+        N.removeAdjEdgeId(G, NIds[NIdx], ThisEdgeAdjIdxs[NIdx]);
+        ThisEdgeAdjIdxs[NIdx] = NodeEntry::getInvalidAdjEdgeIdx();
+      }
+
+      void disconnectFrom(Graph &G, NodeId NId) {
+        if (NId == NIds[0])
+          disconnectFromN(G, 0);
+        else {
+          assert(NId == NIds[1] && "Edge does not connect NId");
+          disconnectFromN(G, 1);
+        }
+      }
+
+      NodeId getN1Id() const { return NIds[0]; }
+      NodeId getN2Id() const { return NIds[1]; }
+
+      MatrixPtr Costs;
+      EdgeMetadata Metadata;
+
+    private:
+      NodeId NIds[2];
+      typename NodeEntry::AdjEdgeIdx ThisEdgeAdjIdxs[2];
+    };
+
+    // ----- MEMBERS -----
+
+    GraphMetadata Metadata;
+    CostAllocator CostAlloc;
+    SolverT *Solver = nullptr;
+
+    using NodeVector = std::vector<NodeEntry>;
+    using FreeNodeVector = std::vector<NodeId>;
+    NodeVector Nodes;
+    FreeNodeVector FreeNodeIds;
+
+    using EdgeVector = std::vector<EdgeEntry>;
+    using FreeEdgeVector = std::vector<EdgeId>;
+    EdgeVector Edges;
+    FreeEdgeVector FreeEdgeIds;
+
+    Graph(const Graph &Other) {}
+
+    // ----- INTERNAL METHODS -----
+
+    NodeEntry &getNode(NodeId NId) {
+      assert(NId < Nodes.size() && "Out of bound NodeId");
+      return Nodes[NId];
+    }
+    const NodeEntry &getNode(NodeId NId) const {
+      assert(NId < Nodes.size() && "Out of bound NodeId");
+      return Nodes[NId];
+    }
+
+    EdgeEntry& getEdge(EdgeId EId) { return Edges[EId]; }
+    const EdgeEntry& getEdge(EdgeId EId) const { return Edges[EId]; }
+
+    NodeId addConstructedNode(NodeEntry N) {
+      NodeId NId = 0;
+      if (!FreeNodeIds.empty()) {
+        NId = FreeNodeIds.back();
+        FreeNodeIds.pop_back();
+        Nodes[NId] = std::move(N);
+      } else {
+        NId = Nodes.size();
+        Nodes.push_back(std::move(N));
+      }
+      return NId;
+    }
+
+    EdgeId addConstructedEdge(EdgeEntry E) {
+      assert(findEdge(E.getN1Id(), E.getN2Id()) == invalidEdgeId() &&
+             "Attempt to add duplicate edge.");
+      EdgeId EId = 0;
+      if (!FreeEdgeIds.empty()) {
+        EId = FreeEdgeIds.back();
+        FreeEdgeIds.pop_back();
+        Edges[EId] = std::move(E);
+      } else {
+        EId = Edges.size();
+        Edges.push_back(std::move(E));
+      }
+
+      EdgeEntry &NE = getEdge(EId);
+
+      // Add the edge to the adjacency sets of its nodes.
+      NE.connect(*this, EId);
+      return EId;
+    }
+
+    void operator=(const Graph &Other) {}
+
+  public:
+    using AdjEdgeItr = typename NodeEntry::AdjEdgeItr;
+
+    class NodeItr {
+    public:
+      using iterator_category = std::forward_iterator_tag;
+      using value_type = NodeId;
+      using difference_type = int;
+      using pointer = NodeId *;
+      using reference = NodeId &;
+
+      NodeItr(NodeId CurNId, const Graph &G)
+        : CurNId(CurNId), EndNId(G.Nodes.size()), FreeNodeIds(G.FreeNodeIds) {
+        this->CurNId = findNextInUse(CurNId); // Move to first in-use node id
+      }
+
+      bool operator==(const NodeItr &O) const { return CurNId == O.CurNId; }
+      bool operator!=(const NodeItr &O) const { return !(*this == O); }
+      NodeItr& operator++() { CurNId = findNextInUse(++CurNId); return *this; }
+      NodeId operator*() const { return CurNId; }
+
+    private:
+      NodeId findNextInUse(NodeId NId) const {
+        while (NId < EndNId && is_contained(FreeNodeIds, NId)) {
+          ++NId;
+        }
+        return NId;
+      }
+
+      NodeId CurNId, EndNId;
+      const FreeNodeVector &FreeNodeIds;
+    };
+
+    class EdgeItr {
+    public:
+      EdgeItr(EdgeId CurEId, const Graph &G)
+        : CurEId(CurEId), EndEId(G.Edges.size()), FreeEdgeIds(G.FreeEdgeIds) {
+        this->CurEId = findNextInUse(CurEId); // Move to first in-use edge id
+      }
+
+      bool operator==(const EdgeItr &O) const { return CurEId == O.CurEId; }
+      bool operator!=(const EdgeItr &O) const { return !(*this == O); }
+      EdgeItr& operator++() { CurEId = findNextInUse(++CurEId); return *this; }
+      EdgeId operator*() const { return CurEId; }
+
+    private:
+      EdgeId findNextInUse(EdgeId EId) const {
+        while (EId < EndEId && is_contained(FreeEdgeIds, EId)) {
+          ++EId;
+        }
+        return EId;
+      }
+
+      EdgeId CurEId, EndEId;
+      const FreeEdgeVector &FreeEdgeIds;
+    };
+
+    class NodeIdSet {
+    public:
+      NodeIdSet(const Graph &G) : G(G) {}
+
+      NodeItr begin() const { return NodeItr(0, G); }
+      NodeItr end() const { return NodeItr(G.Nodes.size(), G); }
+
+      bool empty() const { return G.Nodes.empty(); }
+
+      typename NodeVector::size_type size() const {
+        return G.Nodes.size() - G.FreeNodeIds.size();
+      }
+
+    private:
+      const Graph& G;
+    };
+
+    class EdgeIdSet {
+    public:
+      EdgeIdSet(const Graph &G) : G(G) {}
+
+      EdgeItr begin() const { return EdgeItr(0, G); }
+      EdgeItr end() const { return EdgeItr(G.Edges.size(), G); }
+
+      bool empty() const { return G.Edges.empty(); }
+
+      typename NodeVector::size_type size() const {
+        return G.Edges.size() - G.FreeEdgeIds.size();
+      }
+
+    private:
+      const Graph& G;
+    };
+
+    class AdjEdgeIdSet {
+    public:
+      AdjEdgeIdSet(const NodeEntry &NE) : NE(NE) {}
+
+      typename NodeEntry::AdjEdgeItr begin() const {
+        return NE.getAdjEdgeIds().begin();
+      }
+
+      typename NodeEntry::AdjEdgeItr end() const {
+        return NE.getAdjEdgeIds().end();
+      }
+
+      bool empty() const { return NE.getAdjEdgeIds().empty(); }
+
+      typename NodeEntry::AdjEdgeList::size_type size() const {
+        return NE.getAdjEdgeIds().size();
+      }
+
+    private:
+      const NodeEntry &NE;
+    };
+
+    /// @brief Construct an empty PBQP graph.
+    Graph() = default;
+
+    /// @brief Construct an empty PBQP graph with the given graph metadata.
+    Graph(GraphMetadata Metadata) : Metadata(std::move(Metadata)) {}
+
+    /// @brief Get a reference to the graph metadata.
+    GraphMetadata& getMetadata() { return Metadata; }
+
+    /// @brief Get a const-reference to the graph metadata.
+    const GraphMetadata& getMetadata() const { return Metadata; }
+
+    /// @brief Lock this graph to the given solver instance in preparation
+    /// for running the solver. This method will call solver.handleAddNode for
+    /// each node in the graph, and handleAddEdge for each edge, to give the
+    /// solver an opportunity to set up any requried metadata.
+    void setSolver(SolverT &S) {
+      assert(!Solver && "Solver already set. Call unsetSolver().");
+      Solver = &S;
+      for (auto NId : nodeIds())
+        Solver->handleAddNode(NId);
+      for (auto EId : edgeIds())
+        Solver->handleAddEdge(EId);
+    }
+
+    /// @brief Release from solver instance.
+    void unsetSolver() {
+      assert(Solver && "Solver not set.");
+      Solver = nullptr;
+    }
+
+    /// @brief Add a node with the given costs.
+    /// @param Costs Cost vector for the new node.
+    /// @return Node iterator for the added node.
+    template <typename OtherVectorT>
+    NodeId addNode(OtherVectorT Costs) {
+      // Get cost vector from the problem domain
+      VectorPtr AllocatedCosts = CostAlloc.getVector(std::move(Costs));
+      NodeId NId = addConstructedNode(NodeEntry(AllocatedCosts));
+      if (Solver)
+        Solver->handleAddNode(NId);
+      return NId;
+    }
+
+    /// @brief Add a node bypassing the cost allocator.
+    /// @param Costs Cost vector ptr for the new node (must be convertible to
+    ///        VectorPtr).
+    /// @return Node iterator for the added node.
+    ///
+    ///   This method allows for fast addition of a node whose costs don't need
+    /// to be passed through the cost allocator. The most common use case for
+    /// this is when duplicating costs from an existing node (when using a
+    /// pooling allocator). These have already been uniqued, so we can avoid
+    /// re-constructing and re-uniquing them by attaching them directly to the
+    /// new node.
+    template <typename OtherVectorPtrT>
+    NodeId addNodeBypassingCostAllocator(OtherVectorPtrT Costs) {
+      NodeId NId = addConstructedNode(NodeEntry(Costs));
+      if (Solver)
+        Solver->handleAddNode(NId);
+      return NId;
+    }
+
+    /// @brief Add an edge between the given nodes with the given costs.
+    /// @param N1Id First node.
+    /// @param N2Id Second node.
+    /// @param Costs Cost matrix for new edge.
+    /// @return Edge iterator for the added edge.
+    template <typename OtherVectorT>
+    EdgeId addEdge(NodeId N1Id, NodeId N2Id, OtherVectorT Costs) {
+      assert(getNodeCosts(N1Id).getLength() == Costs.getRows() &&
+             getNodeCosts(N2Id).getLength() == Costs.getCols() &&
+             "Matrix dimensions mismatch.");
+      // Get cost matrix from the problem domain.
+      MatrixPtr AllocatedCosts = CostAlloc.getMatrix(std::move(Costs));
+      EdgeId EId = addConstructedEdge(EdgeEntry(N1Id, N2Id, AllocatedCosts));
+      if (Solver)
+        Solver->handleAddEdge(EId);
+      return EId;
+    }
+
+    /// @brief Add an edge bypassing the cost allocator.
+    /// @param N1Id First node.
+    /// @param N2Id Second node.
+    /// @param Costs Cost matrix for new edge.
+    /// @return Edge iterator for the added edge.
+    ///
+    ///   This method allows for fast addition of an edge whose costs don't need
+    /// to be passed through the cost allocator. The most common use case for
+    /// this is when duplicating costs from an existing edge (when using a
+    /// pooling allocator). These have already been uniqued, so we can avoid
+    /// re-constructing and re-uniquing them by attaching them directly to the
+    /// new edge.
+    template <typename OtherMatrixPtrT>
+    NodeId addEdgeBypassingCostAllocator(NodeId N1Id, NodeId N2Id,
+                                         OtherMatrixPtrT Costs) {
+      assert(getNodeCosts(N1Id).getLength() == Costs->getRows() &&
+             getNodeCosts(N2Id).getLength() == Costs->getCols() &&
+             "Matrix dimensions mismatch.");
+      // Get cost matrix from the problem domain.
+      EdgeId EId = addConstructedEdge(EdgeEntry(N1Id, N2Id, Costs));
+      if (Solver)
+        Solver->handleAddEdge(EId);
+      return EId;
+    }
+
+    /// @brief Returns true if the graph is empty.
+    bool empty() const { return NodeIdSet(*this).empty(); }
+
+    NodeIdSet nodeIds() const { return NodeIdSet(*this); }
+    EdgeIdSet edgeIds() const { return EdgeIdSet(*this); }
+
+    AdjEdgeIdSet adjEdgeIds(NodeId NId) { return AdjEdgeIdSet(getNode(NId)); }
+
+    /// @brief Get the number of nodes in the graph.
+    /// @return Number of nodes in the graph.
+    unsigned getNumNodes() const { return NodeIdSet(*this).size(); }
+
+    /// @brief Get the number of edges in the graph.
+    /// @return Number of edges in the graph.
+    unsigned getNumEdges() const { return EdgeIdSet(*this).size(); }
+
+    /// @brief Set a node's cost vector.
+    /// @param NId Node to update.
+    /// @param Costs New costs to set.
+    template <typename OtherVectorT>
+    void setNodeCosts(NodeId NId, OtherVectorT Costs) {
+      VectorPtr AllocatedCosts = CostAlloc.getVector(std::move(Costs));
+      if (Solver)
+        Solver->handleSetNodeCosts(NId, *AllocatedCosts);
+      getNode(NId).Costs = AllocatedCosts;
+    }
+
+    /// @brief Get a VectorPtr to a node's cost vector. Rarely useful - use
+    ///        getNodeCosts where possible.
+    /// @param NId Node id.
+    /// @return VectorPtr to node cost vector.
+    ///
+    ///   This method is primarily useful for duplicating costs quickly by
+    /// bypassing the cost allocator. See addNodeBypassingCostAllocator. Prefer
+    /// getNodeCosts when dealing with node cost values.
+    const VectorPtr& getNodeCostsPtr(NodeId NId) const {
+      return getNode(NId).Costs;
+    }
+
+    /// @brief Get a node's cost vector.
+    /// @param NId Node id.
+    /// @return Node cost vector.
+    const Vector& getNodeCosts(NodeId NId) const {
+      return *getNodeCostsPtr(NId);
+    }
+
+    NodeMetadata& getNodeMetadata(NodeId NId) {
+      return getNode(NId).Metadata;
+    }
+
+    const NodeMetadata& getNodeMetadata(NodeId NId) const {
+      return getNode(NId).Metadata;
+    }
+
+    typename NodeEntry::AdjEdgeList::size_type getNodeDegree(NodeId NId) const {
+      return getNode(NId).getAdjEdgeIds().size();
+    }
+
+    /// @brief Update an edge's cost matrix.
+    /// @param EId Edge id.
+    /// @param Costs New cost matrix.
+    template <typename OtherMatrixT>
+    void updateEdgeCosts(EdgeId EId, OtherMatrixT Costs) {
+      MatrixPtr AllocatedCosts = CostAlloc.getMatrix(std::move(Costs));
+      if (Solver)
+        Solver->handleUpdateCosts(EId, *AllocatedCosts);
+      getEdge(EId).Costs = AllocatedCosts;
+    }
+
+    /// @brief Get a MatrixPtr to a node's cost matrix. Rarely useful - use
+    ///        getEdgeCosts where possible.
+    /// @param EId Edge id.
+    /// @return MatrixPtr to edge cost matrix.
+    ///
+    ///   This method is primarily useful for duplicating costs quickly by
+    /// bypassing the cost allocator. See addNodeBypassingCostAllocator. Prefer
+    /// getEdgeCosts when dealing with edge cost values.
+    const MatrixPtr& getEdgeCostsPtr(EdgeId EId) const {
+      return getEdge(EId).Costs;
+    }
+
+    /// @brief Get an edge's cost matrix.
+    /// @param EId Edge id.
+    /// @return Edge cost matrix.
+    const Matrix& getEdgeCosts(EdgeId EId) const {
+      return *getEdge(EId).Costs;
+    }
+
+    EdgeMetadata& getEdgeMetadata(EdgeId EId) {
+      return getEdge(EId).Metadata;
+    }
+
+    const EdgeMetadata& getEdgeMetadata(EdgeId EId) const {
+      return getEdge(EId).Metadata;
+    }
+
+    /// @brief Get the first node connected to this edge.
+    /// @param EId Edge id.
+    /// @return The first node connected to the given edge.
+    NodeId getEdgeNode1Id(EdgeId EId) const {
+      return getEdge(EId).getN1Id();
+    }
+
+    /// @brief Get the second node connected to this edge.
+    /// @param EId Edge id.
+    /// @return The second node connected to the given edge.
+    NodeId getEdgeNode2Id(EdgeId EId) const {
+      return getEdge(EId).getN2Id();
+    }
+
+    /// @brief Get the "other" node connected to this edge.
+    /// @param EId Edge id.
+    /// @param NId Node id for the "given" node.
+    /// @return The iterator for the "other" node connected to this edge.
+    NodeId getEdgeOtherNodeId(EdgeId EId, NodeId NId) {
+      EdgeEntry &E = getEdge(EId);
+      if (E.getN1Id() == NId) {
+        return E.getN2Id();
+      } // else
+      return E.getN1Id();
+    }
+
+    /// @brief Get the edge connecting two nodes.
+    /// @param N1Id First node id.
+    /// @param N2Id Second node id.
+    /// @return An id for edge (N1Id, N2Id) if such an edge exists,
+    ///         otherwise returns an invalid edge id.
+    EdgeId findEdge(NodeId N1Id, NodeId N2Id) {
+      for (auto AEId : adjEdgeIds(N1Id)) {
+        if ((getEdgeNode1Id(AEId) == N2Id) ||
+            (getEdgeNode2Id(AEId) == N2Id)) {
+          return AEId;
+        }
+      }
+      return invalidEdgeId();
+    }
+
+    /// @brief Remove a node from the graph.
+    /// @param NId Node id.
+    void removeNode(NodeId NId) {
+      if (Solver)
+        Solver->handleRemoveNode(NId);
+      NodeEntry &N = getNode(NId);
+      // TODO: Can this be for-each'd?
+      for (AdjEdgeItr AEItr = N.adjEdgesBegin(),
+             AEEnd = N.adjEdgesEnd();
+           AEItr != AEEnd;) {
+        EdgeId EId = *AEItr;
+        ++AEItr;
+        removeEdge(EId);
+      }
+      FreeNodeIds.push_back(NId);
+    }
+
+    /// @brief Disconnect an edge from the given node.
+    ///
+    /// Removes the given edge from the adjacency list of the given node.
+    /// This operation leaves the edge in an 'asymmetric' state: It will no
+    /// longer appear in an iteration over the given node's (NId's) edges, but
+    /// will appear in an iteration over the 'other', unnamed node's edges.
+    ///
+    /// This does not correspond to any normal graph operation, but exists to
+    /// support efficient PBQP graph-reduction based solvers. It is used to
+    /// 'effectively' remove the unnamed node from the graph while the solver
+    /// is performing the reduction. The solver will later call reconnectNode
+    /// to restore the edge in the named node's adjacency list.
+    ///
+    /// Since the degree of a node is the number of connected edges,
+    /// disconnecting an edge from a node 'u' will cause the degree of 'u' to
+    /// drop by 1.
+    ///
+    /// A disconnected edge WILL still appear in an iteration over the graph
+    /// edges.
+    ///
+    /// A disconnected edge should not be removed from the graph, it should be
+    /// reconnected first.
+    ///
+    /// A disconnected edge can be reconnected by calling the reconnectEdge
+    /// method.
+    void disconnectEdge(EdgeId EId, NodeId NId) {
+      if (Solver)
+        Solver->handleDisconnectEdge(EId, NId);
+
+      EdgeEntry &E = getEdge(EId);
+      E.disconnectFrom(*this, NId);
+    }
+
+    /// @brief Convenience method to disconnect all neighbours from the given
+    ///        node.
+    void disconnectAllNeighborsFromNode(NodeId NId) {
+      for (auto AEId : adjEdgeIds(NId))
+        disconnectEdge(AEId, getEdgeOtherNodeId(AEId, NId));
+    }
+
+    /// @brief Re-attach an edge to its nodes.
+    ///
+    /// Adds an edge that had been previously disconnected back into the
+    /// adjacency set of the nodes that the edge connects.
+    void reconnectEdge(EdgeId EId, NodeId NId) {
+      EdgeEntry &E = getEdge(EId);
+      E.connectTo(*this, EId, NId);
+      if (Solver)
+        Solver->handleReconnectEdge(EId, NId);
+    }
+
+    /// @brief Remove an edge from the graph.
+    /// @param EId Edge id.
+    void removeEdge(EdgeId EId) {
+      if (Solver)
+        Solver->handleRemoveEdge(EId);
+      EdgeEntry &E = getEdge(EId);
+      E.disconnect();
+      FreeEdgeIds.push_back(EId);
+      Edges[EId].invalidate();
+    }
+
+    /// @brief Remove all nodes and edges from the graph.
+    void clear() {
+      Nodes.clear();
+      FreeNodeIds.clear();
+      Edges.clear();
+      FreeEdgeIds.clear();
+    }
+  };
+
+} // end namespace PBQP
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_PBQP_GRAPH_HPP
diff --git a/linux-x64/clang/include/llvm/CodeGen/PBQP/Math.h b/linux-x64/clang/include/llvm/CodeGen/PBQP/Math.h
new file mode 100644
index 0000000..ba405e8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/PBQP/Math.h
@@ -0,0 +1,292 @@
+//===- Math.h - PBQP Vector and Matrix classes ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PBQP_MATH_H
+#define LLVM_CODEGEN_PBQP_MATH_H
+
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/STLExtras.h"
+#include <algorithm>
+#include <cassert>
+#include <functional>
+#include <memory>
+
+namespace llvm {
+namespace PBQP {
+
+using PBQPNum = float;
+
+/// \brief PBQP Vector class.
+class Vector {
+  friend hash_code hash_value(const Vector &);
+
+public:
+  /// \brief Construct a PBQP vector of the given size.
+  explicit Vector(unsigned Length)
+    : Length(Length), Data(llvm::make_unique<PBQPNum []>(Length)) {}
+
+  /// \brief Construct a PBQP vector with initializer.
+  Vector(unsigned Length, PBQPNum InitVal)
+    : Length(Length), Data(llvm::make_unique<PBQPNum []>(Length)) {
+    std::fill(Data.get(), Data.get() + Length, InitVal);
+  }
+
+  /// \brief Copy construct a PBQP vector.
+  Vector(const Vector &V)
+    : Length(V.Length), Data(llvm::make_unique<PBQPNum []>(Length)) {
+    std::copy(V.Data.get(), V.Data.get() + Length, Data.get());
+  }
+
+  /// \brief Move construct a PBQP vector.
+  Vector(Vector &&V)
+    : Length(V.Length), Data(std::move(V.Data)) {
+    V.Length = 0;
+  }
+
+  /// \brief Comparison operator.
+  bool operator==(const Vector &V) const {
+    assert(Length != 0 && Data && "Invalid vector");
+    if (Length != V.Length)
+      return false;
+    return std::equal(Data.get(), Data.get() + Length, V.Data.get());
+  }
+
+  /// \brief Return the length of the vector
+  unsigned getLength() const {
+    assert(Length != 0 && Data && "Invalid vector");
+    return Length;
+  }
+
+  /// \brief Element access.
+  PBQPNum& operator[](unsigned Index) {
+    assert(Length != 0 && Data && "Invalid vector");
+    assert(Index < Length && "Vector element access out of bounds.");
+    return Data[Index];
+  }
+
+  /// \brief Const element access.
+  const PBQPNum& operator[](unsigned Index) const {
+    assert(Length != 0 && Data && "Invalid vector");
+    assert(Index < Length && "Vector element access out of bounds.");
+    return Data[Index];
+  }
+
+  /// \brief Add another vector to this one.
+  Vector& operator+=(const Vector &V) {
+    assert(Length != 0 && Data && "Invalid vector");
+    assert(Length == V.Length && "Vector length mismatch.");
+    std::transform(Data.get(), Data.get() + Length, V.Data.get(), Data.get(),
+                   std::plus<PBQPNum>());
+    return *this;
+  }
+
+  /// \brief Returns the index of the minimum value in this vector
+  unsigned minIndex() const {
+    assert(Length != 0 && Data && "Invalid vector");
+    return std::min_element(Data.get(), Data.get() + Length) - Data.get();
+  }
+
+private:
+  unsigned Length;
+  std::unique_ptr<PBQPNum []> Data;
+};
+
+/// \brief Return a hash_value for the given vector.
+inline hash_code hash_value(const Vector &V) {
+  unsigned *VBegin = reinterpret_cast<unsigned*>(V.Data.get());
+  unsigned *VEnd = reinterpret_cast<unsigned*>(V.Data.get() + V.Length);
+  return hash_combine(V.Length, hash_combine_range(VBegin, VEnd));
+}
+
+/// \brief Output a textual representation of the given vector on the given
+///        output stream.
+template <typename OStream>
+OStream& operator<<(OStream &OS, const Vector &V) {
+  assert((V.getLength() != 0) && "Zero-length vector badness.");
+
+  OS << "[ " << V[0];
+  for (unsigned i = 1; i < V.getLength(); ++i)
+    OS << ", " << V[i];
+  OS << " ]";
+
+  return OS;
+}
+
+/// \brief PBQP Matrix class
+class Matrix {
+private:
+  friend hash_code hash_value(const Matrix &);
+
+public:
+  /// \brief Construct a PBQP Matrix with the given dimensions.
+  Matrix(unsigned Rows, unsigned Cols) :
+    Rows(Rows), Cols(Cols), Data(llvm::make_unique<PBQPNum []>(Rows * Cols)) {
+  }
+
+  /// \brief Construct a PBQP Matrix with the given dimensions and initial
+  /// value.
+  Matrix(unsigned Rows, unsigned Cols, PBQPNum InitVal)
+    : Rows(Rows), Cols(Cols),
+      Data(llvm::make_unique<PBQPNum []>(Rows * Cols)) {
+    std::fill(Data.get(), Data.get() + (Rows * Cols), InitVal);
+  }
+
+  /// \brief Copy construct a PBQP matrix.
+  Matrix(const Matrix &M)
+    : Rows(M.Rows), Cols(M.Cols),
+      Data(llvm::make_unique<PBQPNum []>(Rows * Cols)) {
+    std::copy(M.Data.get(), M.Data.get() + (Rows * Cols), Data.get());
+  }
+
+  /// \brief Move construct a PBQP matrix.
+  Matrix(Matrix &&M)
+    : Rows(M.Rows), Cols(M.Cols), Data(std::move(M.Data)) {
+    M.Rows = M.Cols = 0;
+  }
+
+  /// \brief Comparison operator.
+  bool operator==(const Matrix &M) const {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    if (Rows != M.Rows || Cols != M.Cols)
+      return false;
+    return std::equal(Data.get(), Data.get() + (Rows * Cols), M.Data.get());
+  }
+
+  /// \brief Return the number of rows in this matrix.
+  unsigned getRows() const {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    return Rows;
+  }
+
+  /// \brief Return the number of cols in this matrix.
+  unsigned getCols() const {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    return Cols;
+  }
+
+  /// \brief Matrix element access.
+  PBQPNum* operator[](unsigned R) {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    assert(R < Rows && "Row out of bounds.");
+    return Data.get() + (R * Cols);
+  }
+
+  /// \brief Matrix element access.
+  const PBQPNum* operator[](unsigned R) const {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    assert(R < Rows && "Row out of bounds.");
+    return Data.get() + (R * Cols);
+  }
+
+  /// \brief Returns the given row as a vector.
+  Vector getRowAsVector(unsigned R) const {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    Vector V(Cols);
+    for (unsigned C = 0; C < Cols; ++C)
+      V[C] = (*this)[R][C];
+    return V;
+  }
+
+  /// \brief Returns the given column as a vector.
+  Vector getColAsVector(unsigned C) const {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    Vector V(Rows);
+    for (unsigned R = 0; R < Rows; ++R)
+      V[R] = (*this)[R][C];
+    return V;
+  }
+
+  /// \brief Matrix transpose.
+  Matrix transpose() const {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    Matrix M(Cols, Rows);
+    for (unsigned r = 0; r < Rows; ++r)
+      for (unsigned c = 0; c < Cols; ++c)
+        M[c][r] = (*this)[r][c];
+    return M;
+  }
+
+  /// \brief Add the given matrix to this one.
+  Matrix& operator+=(const Matrix &M) {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    assert(Rows == M.Rows && Cols == M.Cols &&
+           "Matrix dimensions mismatch.");
+    std::transform(Data.get(), Data.get() + (Rows * Cols), M.Data.get(),
+                   Data.get(), std::plus<PBQPNum>());
+    return *this;
+  }
+
+  Matrix operator+(const Matrix &M) {
+    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
+    Matrix Tmp(*this);
+    Tmp += M;
+    return Tmp;
+  }
+
+private:
+  unsigned Rows, Cols;
+  std::unique_ptr<PBQPNum []> Data;
+};
+
+/// \brief Return a hash_code for the given matrix.
+inline hash_code hash_value(const Matrix &M) {
+  unsigned *MBegin = reinterpret_cast<unsigned*>(M.Data.get());
+  unsigned *MEnd =
+    reinterpret_cast<unsigned*>(M.Data.get() + (M.Rows * M.Cols));
+  return hash_combine(M.Rows, M.Cols, hash_combine_range(MBegin, MEnd));
+}
+
+/// \brief Output a textual representation of the given matrix on the given
+///        output stream.
+template <typename OStream>
+OStream& operator<<(OStream &OS, const Matrix &M) {
+  assert((M.getRows() != 0) && "Zero-row matrix badness.");
+  for (unsigned i = 0; i < M.getRows(); ++i)
+    OS << M.getRowAsVector(i) << "\n";
+  return OS;
+}
+
+template <typename Metadata>
+class MDVector : public Vector {
+public:
+  MDVector(const Vector &v) : Vector(v), md(*this) {}
+  MDVector(Vector &&v) : Vector(std::move(v)), md(*this) { }
+
+  const Metadata& getMetadata() const { return md; }
+
+private:
+  Metadata md;
+};
+
+template <typename Metadata>
+inline hash_code hash_value(const MDVector<Metadata> &V) {
+  return hash_value(static_cast<const Vector&>(V));
+}
+
+template <typename Metadata>
+class MDMatrix : public Matrix {
+public:
+  MDMatrix(const Matrix &m) : Matrix(m), md(*this) {}
+  MDMatrix(Matrix &&m) : Matrix(std::move(m)), md(*this) { }
+
+  const Metadata& getMetadata() const { return md; }
+
+private:
+  Metadata md;
+};
+
+template <typename Metadata>
+inline hash_code hash_value(const MDMatrix<Metadata> &M) {
+  return hash_value(static_cast<const Matrix&>(M));
+}
+
+} // end namespace PBQP
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_PBQP_MATH_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/PBQP/ReductionRules.h b/linux-x64/clang/include/llvm/CodeGen/PBQP/ReductionRules.h
new file mode 100644
index 0000000..8aeb519
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/PBQP/ReductionRules.h
@@ -0,0 +1,223 @@
+//===- ReductionRules.h - Reduction Rules -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Reduction Rules.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PBQP_REDUCTIONRULES_H
+#define LLVM_CODEGEN_PBQP_REDUCTIONRULES_H
+
+#include "Graph.h"
+#include "Math.h"
+#include "Solution.h"
+#include <cassert>
+#include <limits>
+
+namespace llvm {
+namespace PBQP {
+
+  /// \brief Reduce a node of degree one.
+  ///
+  /// Propagate costs from the given node, which must be of degree one, to its
+  /// neighbor. Notify the problem domain.
+  template <typename GraphT>
+  void applyR1(GraphT &G, typename GraphT::NodeId NId) {
+    using NodeId = typename GraphT::NodeId;
+    using EdgeId = typename GraphT::EdgeId;
+    using Vector = typename GraphT::Vector;
+    using Matrix = typename GraphT::Matrix;
+    using RawVector = typename GraphT::RawVector;
+
+    assert(G.getNodeDegree(NId) == 1 &&
+           "R1 applied to node with degree != 1.");
+
+    EdgeId EId = *G.adjEdgeIds(NId).begin();
+    NodeId MId = G.getEdgeOtherNodeId(EId, NId);
+
+    const Matrix &ECosts = G.getEdgeCosts(EId);
+    const Vector &XCosts = G.getNodeCosts(NId);
+    RawVector YCosts = G.getNodeCosts(MId);
+
+    // Duplicate a little to avoid transposing matrices.
+    if (NId == G.getEdgeNode1Id(EId)) {
+      for (unsigned j = 0; j < YCosts.getLength(); ++j) {
+        PBQPNum Min = ECosts[0][j] + XCosts[0];
+        for (unsigned i = 1; i < XCosts.getLength(); ++i) {
+          PBQPNum C = ECosts[i][j] + XCosts[i];
+          if (C < Min)
+            Min = C;
+        }
+        YCosts[j] += Min;
+      }
+    } else {
+      for (unsigned i = 0; i < YCosts.getLength(); ++i) {
+        PBQPNum Min = ECosts[i][0] + XCosts[0];
+        for (unsigned j = 1; j < XCosts.getLength(); ++j) {
+          PBQPNum C = ECosts[i][j] + XCosts[j];
+          if (C < Min)
+            Min = C;
+        }
+        YCosts[i] += Min;
+      }
+    }
+    G.setNodeCosts(MId, YCosts);
+    G.disconnectEdge(EId, MId);
+  }
+
+  template <typename GraphT>
+  void applyR2(GraphT &G, typename GraphT::NodeId NId) {
+    using NodeId = typename GraphT::NodeId;
+    using EdgeId = typename GraphT::EdgeId;
+    using Vector = typename GraphT::Vector;
+    using Matrix = typename GraphT::Matrix;
+    using RawMatrix = typename GraphT::RawMatrix;
+
+    assert(G.getNodeDegree(NId) == 2 &&
+           "R2 applied to node with degree != 2.");
+
+    const Vector &XCosts = G.getNodeCosts(NId);
+
+    typename GraphT::AdjEdgeItr AEItr = G.adjEdgeIds(NId).begin();
+    EdgeId YXEId = *AEItr,
+           ZXEId = *(++AEItr);
+
+    NodeId YNId = G.getEdgeOtherNodeId(YXEId, NId),
+           ZNId = G.getEdgeOtherNodeId(ZXEId, NId);
+
+    bool FlipEdge1 = (G.getEdgeNode1Id(YXEId) == NId),
+         FlipEdge2 = (G.getEdgeNode1Id(ZXEId) == NId);
+
+    const Matrix *YXECosts = FlipEdge1 ?
+      new Matrix(G.getEdgeCosts(YXEId).transpose()) :
+      &G.getEdgeCosts(YXEId);
+
+    const Matrix *ZXECosts = FlipEdge2 ?
+      new Matrix(G.getEdgeCosts(ZXEId).transpose()) :
+      &G.getEdgeCosts(ZXEId);
+
+    unsigned XLen = XCosts.getLength(),
+      YLen = YXECosts->getRows(),
+      ZLen = ZXECosts->getRows();
+
+    RawMatrix Delta(YLen, ZLen);
+
+    for (unsigned i = 0; i < YLen; ++i) {
+      for (unsigned j = 0; j < ZLen; ++j) {
+        PBQPNum Min = (*YXECosts)[i][0] + (*ZXECosts)[j][0] + XCosts[0];
+        for (unsigned k = 1; k < XLen; ++k) {
+          PBQPNum C = (*YXECosts)[i][k] + (*ZXECosts)[j][k] + XCosts[k];
+          if (C < Min) {
+            Min = C;
+          }
+        }
+        Delta[i][j] = Min;
+      }
+    }
+
+    if (FlipEdge1)
+      delete YXECosts;
+
+    if (FlipEdge2)
+      delete ZXECosts;
+
+    EdgeId YZEId = G.findEdge(YNId, ZNId);
+
+    if (YZEId == G.invalidEdgeId()) {
+      YZEId = G.addEdge(YNId, ZNId, Delta);
+    } else {
+      const Matrix &YZECosts = G.getEdgeCosts(YZEId);
+      if (YNId == G.getEdgeNode1Id(YZEId)) {
+        G.updateEdgeCosts(YZEId, Delta + YZECosts);
+      } else {
+        G.updateEdgeCosts(YZEId, Delta.transpose() + YZECosts);
+      }
+    }
+
+    G.disconnectEdge(YXEId, YNId);
+    G.disconnectEdge(ZXEId, ZNId);
+
+    // TODO: Try to normalize newly added/modified edge.
+  }
+
+#ifndef NDEBUG
+  // Does this Cost vector have any register options ?
+  template <typename VectorT>
+  bool hasRegisterOptions(const VectorT &V) {
+    unsigned VL = V.getLength();
+
+    // An empty or spill only cost vector does not provide any register option.
+    if (VL <= 1)
+      return false;
+
+    // If there are registers in the cost vector, but all of them have infinite
+    // costs, then ... there is no available register.
+    for (unsigned i = 1; i < VL; ++i)
+      if (V[i] != std::numeric_limits<PBQP::PBQPNum>::infinity())
+        return true;
+
+    return false;
+  }
+#endif
+
+  // \brief Find a solution to a fully reduced graph by backpropagation.
+  //
+  // Given a graph and a reduction order, pop each node from the reduction
+  // order and greedily compute a minimum solution based on the node costs, and
+  // the dependent costs due to previously solved nodes.
+  //
+  // Note - This does not return the graph to its original (pre-reduction)
+  //        state: the existing solvers destructively alter the node and edge
+  //        costs. Given that, the backpropagate function doesn't attempt to
+  //        replace the edges either, but leaves the graph in its reduced
+  //        state.
+  template <typename GraphT, typename StackT>
+  Solution backpropagate(GraphT& G, StackT stack) {
+    using NodeId = GraphBase::NodeId;
+    using Matrix = typename GraphT::Matrix;
+    using RawVector = typename GraphT::RawVector;
+
+    Solution s;
+
+    while (!stack.empty()) {
+      NodeId NId = stack.back();
+      stack.pop_back();
+
+      RawVector v = G.getNodeCosts(NId);
+
+#ifndef NDEBUG
+      // Although a conservatively allocatable node can be allocated to a register,
+      // spilling it may provide a lower cost solution. Assert here that spilling
+      // is done by choice, not because there were no register available.
+      if (G.getNodeMetadata(NId).wasConservativelyAllocatable())
+        assert(hasRegisterOptions(v) && "A conservatively allocatable node "
+                                        "must have available register options");
+#endif
+
+      for (auto EId : G.adjEdgeIds(NId)) {
+        const Matrix& edgeCosts = G.getEdgeCosts(EId);
+        if (NId == G.getEdgeNode1Id(EId)) {
+          NodeId mId = G.getEdgeNode2Id(EId);
+          v += edgeCosts.getColAsVector(s.getSelection(mId));
+        } else {
+          NodeId mId = G.getEdgeNode1Id(EId);
+          v += edgeCosts.getRowAsVector(s.getSelection(mId));
+        }
+      }
+
+      s.setSelection(NId, v.minIndex());
+    }
+
+    return s;
+  }
+
+} // end namespace PBQP
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_PBQP_REDUCTIONRULES_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/PBQP/Solution.h b/linux-x64/clang/include/llvm/CodeGen/PBQP/Solution.h
new file mode 100644
index 0000000..6a24727
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/PBQP/Solution.h
@@ -0,0 +1,56 @@
+//===- Solution.h - PBQP Solution -------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// PBQP Solution class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PBQP_SOLUTION_H
+#define LLVM_CODEGEN_PBQP_SOLUTION_H
+
+#include "llvm/CodeGen/PBQP/Graph.h"
+#include <cassert>
+#include <map>
+
+namespace llvm {
+namespace PBQP {
+
+  /// \brief Represents a solution to a PBQP problem.
+  ///
+  /// To get the selection for each node in the problem use the getSelection method.
+  class Solution {
+  private:
+    using SelectionsMap = std::map<GraphBase::NodeId, unsigned>;
+    SelectionsMap selections;
+
+  public:
+    /// \brief Initialise an empty solution.
+    Solution() = default;
+
+    /// \brief Set the selection for a given node.
+    /// @param nodeId Node id.
+    /// @param selection Selection for nodeId.
+    void setSelection(GraphBase::NodeId nodeId, unsigned selection) {
+      selections[nodeId] = selection;
+    }
+
+    /// \brief Get a node's selection.
+    /// @param nodeId Node id.
+    /// @return The selection for nodeId;
+    unsigned getSelection(GraphBase::NodeId nodeId) const {
+      SelectionsMap::const_iterator sItr = selections.find(nodeId);
+      assert(sItr != selections.end() && "No selection for node.");
+      return sItr->second;
+    }
+  };
+
+} // end namespace PBQP
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_PBQP_SOLUTION_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/PBQPRAConstraint.h b/linux-x64/clang/include/llvm/CodeGen/PBQPRAConstraint.h
new file mode 100644
index 0000000..269b7a7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/PBQPRAConstraint.h
@@ -0,0 +1,71 @@
+//===- RegAllocPBQP.h -------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PBQPBuilder interface, for classes which build PBQP
+// instances to represent register allocation problems, and the RegAllocPBQP
+// interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PBQPRACONSTRAINT_H
+#define LLVM_CODEGEN_PBQPRACONSTRAINT_H
+
+#include <algorithm>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+namespace PBQP {
+namespace RegAlloc {
+
+// Forward declare PBQP graph class.
+class PBQPRAGraph;
+
+} // end namespace RegAlloc
+} // end namespace PBQP
+
+using PBQPRAGraph = PBQP::RegAlloc::PBQPRAGraph;
+
+/// @brief Abstract base for classes implementing PBQP register allocation
+///        constraints (e.g. Spill-costs, interference, coalescing).
+class PBQPRAConstraint {
+public:
+  virtual ~PBQPRAConstraint() = 0;
+  virtual void apply(PBQPRAGraph &G) = 0;
+
+private:
+  virtual void anchor();
+};
+
+/// @brief PBQP register allocation constraint composer.
+///
+///   Constraints added to this list will be applied, in the order that they are
+/// added, to the PBQP graph.
+class PBQPRAConstraintList : public PBQPRAConstraint {
+public:
+  void apply(PBQPRAGraph &G) override {
+    for (auto &C : Constraints)
+      C->apply(G);
+  }
+
+  void addConstraint(std::unique_ptr<PBQPRAConstraint> C) {
+    if (C)
+      Constraints.push_back(std::move(C));
+  }
+
+private:
+  std::vector<std::unique_ptr<PBQPRAConstraint>> Constraints;
+
+  void anchor() override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_PBQPRACONSTRAINT_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ParallelCG.h b/linux-x64/clang/include/llvm/CodeGen/ParallelCG.h
new file mode 100644
index 0000000..14ef0ec
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ParallelCG.h
@@ -0,0 +1,48 @@
+//===-- llvm/CodeGen/ParallelCG.h - Parallel code generation ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header declares functions that can be used for parallel code generation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PARALLELCG_H
+#define LLVM_CODEGEN_PARALLELCG_H
+
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Target/TargetMachine.h"
+
+#include <functional>
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+class Module;
+class TargetOptions;
+class raw_pwrite_stream;
+
+/// Split M into OSs.size() partitions, and generate code for each. Takes a
+/// factory function for the TargetMachine TMFactory. Writes OSs.size() output
+/// files to the output streams in OSs. The resulting output files if linked
+/// together are intended to be equivalent to the single output file that would
+/// have been code generated from M.
+///
+/// Writes bitcode for individual partitions into output streams in BCOSs, if
+/// BCOSs is not empty.
+///
+/// \returns M if OSs.size() == 1, otherwise returns std::unique_ptr<Module>().
+std::unique_ptr<Module>
+splitCodeGen(std::unique_ptr<Module> M, ArrayRef<raw_pwrite_stream *> OSs,
+             ArrayRef<llvm::raw_pwrite_stream *> BCOSs,
+             const std::function<std::unique_ptr<TargetMachine>()> &TMFactory,
+             TargetMachine::CodeGenFileType FT = TargetMachine::CGFT_ObjectFile,
+             bool PreserveLocals = false);
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/Passes.h b/linux-x64/clang/include/llvm/CodeGen/Passes.h
new file mode 100644
index 0000000..68fd04b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/Passes.h
@@ -0,0 +1,439 @@
+//===-- Passes.h - Target independent code generation passes ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines interfaces to access the target independent code generation
+// passes provided by the LLVM backend.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PASSES_H
+#define LLVM_CODEGEN_PASSES_H
+
+#include <functional>
+#include <string>
+
+namespace llvm {
+
+class FunctionPass;
+class MachineFunction;
+class MachineFunctionPass;
+class ModulePass;
+class Pass;
+class TargetMachine;
+class TargetRegisterClass;
+class raw_ostream;
+
+} // End llvm namespace
+
+/// List of target independent CodeGen pass IDs.
+namespace llvm {
+  FunctionPass *createAtomicExpandPass();
+
+  /// createUnreachableBlockEliminationPass - The LLVM code generator does not
+  /// work well with unreachable basic blocks (what live ranges make sense for a
+  /// block that cannot be reached?).  As such, a code generator should either
+  /// not instruction select unreachable blocks, or run this pass as its
+  /// last LLVM modifying pass to clean up blocks that are not reachable from
+  /// the entry block.
+  FunctionPass *createUnreachableBlockEliminationPass();
+
+  /// MachineFunctionPrinter pass - This pass prints out the machine function to
+  /// the given stream as a debugging tool.
+  MachineFunctionPass *
+  createMachineFunctionPrinterPass(raw_ostream &OS,
+                                   const std::string &Banner ="");
+
+  /// MIRPrinting pass - this pass prints out the LLVM IR into the given stream
+  /// using the MIR serialization format.
+  MachineFunctionPass *createPrintMIRPass(raw_ostream &OS);
+
+  /// This pass resets a MachineFunction when it has the FailedISel property
+  /// as if it was just created.
+  /// If EmitFallbackDiag is true, the pass will emit a
+  /// DiagnosticInfoISelFallback for every MachineFunction it resets.
+  /// If AbortOnFailedISel is true, abort compilation instead of resetting.
+  MachineFunctionPass *createResetMachineFunctionPass(bool EmitFallbackDiag,
+                                                      bool AbortOnFailedISel);
+
+  /// createCodeGenPreparePass - Transform the code to expose more pattern
+  /// matching during instruction selection.
+  FunctionPass *createCodeGenPreparePass();
+
+  /// createScalarizeMaskedMemIntrinPass - Replace masked load, store, gather
+  /// and scatter intrinsics with scalar code when target doesn't support them.
+  FunctionPass *createScalarizeMaskedMemIntrinPass();
+
+  /// AtomicExpandID -- Lowers atomic operations in terms of either cmpxchg
+  /// load-linked/store-conditional loops.
+  extern char &AtomicExpandID;
+
+  /// MachineLoopInfo - This pass is a loop analysis pass.
+  extern char &MachineLoopInfoID;
+
+  /// MachineDominators - This pass is a machine dominators analysis pass.
+  extern char &MachineDominatorsID;
+
+/// MachineDominanaceFrontier - This pass is a machine dominators analysis pass.
+  extern char &MachineDominanceFrontierID;
+
+  /// MachineRegionInfo - This pass computes SESE regions for machine functions.
+  extern char &MachineRegionInfoPassID;
+
+  /// EdgeBundles analysis - Bundle machine CFG edges.
+  extern char &EdgeBundlesID;
+
+  /// LiveVariables pass - This pass computes the set of blocks in which each
+  /// variable is life and sets machine operand kill flags.
+  extern char &LiveVariablesID;
+
+  /// PHIElimination - This pass eliminates machine instruction PHI nodes
+  /// by inserting copy instructions.  This destroys SSA information, but is the
+  /// desired input for some register allocators.  This pass is "required" by
+  /// these register allocator like this: AU.addRequiredID(PHIEliminationID);
+  extern char &PHIEliminationID;
+
+  /// LiveIntervals - This analysis keeps track of the live ranges of virtual
+  /// and physical registers.
+  extern char &LiveIntervalsID;
+
+  /// LiveStacks pass. An analysis keeping track of the liveness of stack slots.
+  extern char &LiveStacksID;
+
+  /// TwoAddressInstruction - This pass reduces two-address instructions to
+  /// use two operands. This destroys SSA information but it is desired by
+  /// register allocators.
+  extern char &TwoAddressInstructionPassID;
+
+  /// ProcessImpicitDefs pass - This pass removes IMPLICIT_DEFs.
+  extern char &ProcessImplicitDefsID;
+
+  /// RegisterCoalescer - This pass merges live ranges to eliminate copies.
+  extern char &RegisterCoalescerID;
+
+  /// MachineScheduler - This pass schedules machine instructions.
+  extern char &MachineSchedulerID;
+
+  /// PostMachineScheduler - This pass schedules machine instructions postRA.
+  extern char &PostMachineSchedulerID;
+
+  /// SpillPlacement analysis. Suggest optimal placement of spill code between
+  /// basic blocks.
+  extern char &SpillPlacementID;
+
+  /// ShrinkWrap pass. Look for the best place to insert save and restore
+  // instruction and update the MachineFunctionInfo with that information.
+  extern char &ShrinkWrapID;
+
+  /// LiveRangeShrink pass. Move instruction close to its definition to shrink
+  /// the definition's live range.
+  extern char &LiveRangeShrinkID;
+
+  /// Greedy register allocator.
+  extern char &RAGreedyID;
+
+  /// Basic register allocator.
+  extern char &RABasicID;
+
+  /// VirtRegRewriter pass. Rewrite virtual registers to physical registers as
+  /// assigned in VirtRegMap.
+  extern char &VirtRegRewriterID;
+
+  /// UnreachableMachineBlockElimination - This pass removes unreachable
+  /// machine basic blocks.
+  extern char &UnreachableMachineBlockElimID;
+
+  /// DeadMachineInstructionElim - This pass removes dead machine instructions.
+  extern char &DeadMachineInstructionElimID;
+
+  /// This pass adds dead/undef flags after analyzing subregister lanes.
+  extern char &DetectDeadLanesID;
+
+  /// This pass perform post-ra machine sink for COPY instructions.
+  extern char &PostRAMachineSinkingID;
+
+  /// FastRegisterAllocation Pass - This pass register allocates as fast as
+  /// possible. It is best suited for debug code where live ranges are short.
+  ///
+  FunctionPass *createFastRegisterAllocator();
+
+  /// BasicRegisterAllocation Pass - This pass implements a degenerate global
+  /// register allocator using the basic regalloc framework.
+  ///
+  FunctionPass *createBasicRegisterAllocator();
+
+  /// Greedy register allocation pass - This pass implements a global register
+  /// allocator for optimized builds.
+  ///
+  FunctionPass *createGreedyRegisterAllocator();
+
+  /// PBQPRegisterAllocation Pass - This pass implements the Partitioned Boolean
+  /// Quadratic Prograaming (PBQP) based register allocator.
+  ///
+  FunctionPass *createDefaultPBQPRegisterAllocator();
+
+  /// PrologEpilogCodeInserter - This pass inserts prolog and epilog code,
+  /// and eliminates abstract frame references.
+  extern char &PrologEpilogCodeInserterID;
+  MachineFunctionPass *createPrologEpilogInserterPass();
+
+  /// ExpandPostRAPseudos - This pass expands pseudo instructions after
+  /// register allocation.
+  extern char &ExpandPostRAPseudosID;
+
+  /// createPostRAHazardRecognizer - This pass runs the post-ra hazard
+  /// recognizer.
+  extern char &PostRAHazardRecognizerID;
+
+  /// createPostRAScheduler - This pass performs post register allocation
+  /// scheduling.
+  extern char &PostRASchedulerID;
+
+  /// BranchFolding - This pass performs machine code CFG based
+  /// optimizations to delete branches to branches, eliminate branches to
+  /// successor blocks (creating fall throughs), and eliminating branches over
+  /// branches.
+  extern char &BranchFolderPassID;
+
+  /// BranchRelaxation - This pass replaces branches that need to jump further
+  /// than is supported by a branch instruction.
+  extern char &BranchRelaxationPassID;
+
+  /// MachineFunctionPrinterPass - This pass prints out MachineInstr's.
+  extern char &MachineFunctionPrinterPassID;
+
+  /// MIRPrintingPass - this pass prints out the LLVM IR using the MIR
+  /// serialization format.
+  extern char &MIRPrintingPassID;
+
+  /// TailDuplicate - Duplicate blocks with unconditional branches
+  /// into tails of their predecessors.
+  extern char &TailDuplicateID;
+
+  /// Duplicate blocks with unconditional branches into tails of their
+  /// predecessors. Variant that works before register allocation.
+  extern char &EarlyTailDuplicateID;
+
+  /// MachineTraceMetrics - This pass computes critical path and CPU resource
+  /// usage in an ensemble of traces.
+  extern char &MachineTraceMetricsID;
+
+  /// EarlyIfConverter - This pass performs if-conversion on SSA form by
+  /// inserting cmov instructions.
+  extern char &EarlyIfConverterID;
+
+  /// This pass performs instruction combining using trace metrics to estimate
+  /// critical-path and resource depth.
+  extern char &MachineCombinerID;
+
+  /// StackSlotColoring - This pass performs stack coloring and merging.
+  /// It merges disjoint allocas to reduce the stack size.
+  extern char &StackColoringID;
+
+  /// IfConverter - This pass performs machine code if conversion.
+  extern char &IfConverterID;
+
+  FunctionPass *createIfConverter(
+      std::function<bool(const MachineFunction &)> Ftor);
+
+  /// MachineBlockPlacement - This pass places basic blocks based on branch
+  /// probabilities.
+  extern char &MachineBlockPlacementID;
+
+  /// MachineBlockPlacementStats - This pass collects statistics about the
+  /// basic block placement using branch probabilities and block frequency
+  /// information.
+  extern char &MachineBlockPlacementStatsID;
+
+  /// GCLowering Pass - Used by gc.root to perform its default lowering
+  /// operations.
+  FunctionPass *createGCLoweringPass();
+
+  /// ShadowStackGCLowering - Implements the custom lowering mechanism
+  /// used by the shadow stack GC.  Only runs on functions which opt in to
+  /// the shadow stack collector.
+  FunctionPass *createShadowStackGCLoweringPass();
+
+  /// GCMachineCodeAnalysis - Target-independent pass to mark safe points
+  /// in machine code. Must be added very late during code generation, just
+  /// prior to output, and importantly after all CFG transformations (such as
+  /// branch folding).
+  extern char &GCMachineCodeAnalysisID;
+
+  /// Creates a pass to print GC metadata.
+  ///
+  FunctionPass *createGCInfoPrinter(raw_ostream &OS);
+
+  /// MachineCSE - This pass performs global CSE on machine instructions.
+  extern char &MachineCSEID;
+
+  /// ImplicitNullChecks - This pass folds null pointer checks into nearby
+  /// memory operations.
+  extern char &ImplicitNullChecksID;
+
+  /// This pass performs loop invariant code motion on machine instructions.
+  extern char &MachineLICMID;
+
+  /// This pass performs loop invariant code motion on machine instructions.
+  /// This variant works before register allocation. \see MachineLICMID.
+  extern char &EarlyMachineLICMID;
+
+  /// MachineSinking - This pass performs sinking on machine instructions.
+  extern char &MachineSinkingID;
+
+  /// MachineCopyPropagation - This pass performs copy propagation on
+  /// machine instructions.
+  extern char &MachineCopyPropagationID;
+
+  /// PeepholeOptimizer - This pass performs peephole optimizations -
+  /// like extension and comparison eliminations.
+  extern char &PeepholeOptimizerID;
+
+  /// OptimizePHIs - This pass optimizes machine instruction PHIs
+  /// to take advantage of opportunities created during DAG legalization.
+  extern char &OptimizePHIsID;
+
+  /// StackSlotColoring - This pass performs stack slot coloring.
+  extern char &StackSlotColoringID;
+
+  /// \brief This pass lays out funclets contiguously.
+  extern char &FuncletLayoutID;
+
+  /// This pass inserts the XRay instrumentation sleds if they are supported by
+  /// the target platform.
+  extern char &XRayInstrumentationID;
+
+  /// This pass inserts FEntry calls
+  extern char &FEntryInserterID;
+
+  /// \brief This pass implements the "patchable-function" attribute.
+  extern char &PatchableFunctionID;
+
+  /// createStackProtectorPass - This pass adds stack protectors to functions.
+  ///
+  FunctionPass *createStackProtectorPass();
+
+  /// createMachineVerifierPass - This pass verifies cenerated machine code
+  /// instructions for correctness.
+  ///
+  FunctionPass *createMachineVerifierPass(const std::string& Banner);
+
+  /// createDwarfEHPass - This pass mulches exception handling code into a form
+  /// adapted to code generation.  Required if using dwarf exception handling.
+  FunctionPass *createDwarfEHPass();
+
+  /// createWinEHPass - Prepares personality functions used by MSVC on Windows,
+  /// in addition to the Itanium LSDA based personalities.
+  FunctionPass *createWinEHPass();
+
+  /// createSjLjEHPreparePass - This pass adapts exception handling code to use
+  /// the GCC-style builtin setjmp/longjmp (sjlj) to handling EH control flow.
+  ///
+  FunctionPass *createSjLjEHPreparePass();
+
+  /// LocalStackSlotAllocation - This pass assigns local frame indices to stack
+  /// slots relative to one another and allocates base registers to access them
+  /// when it is estimated by the target to be out of range of normal frame
+  /// pointer or stack pointer index addressing.
+  extern char &LocalStackSlotAllocationID;
+
+  /// ExpandISelPseudos - This pass expands pseudo-instructions.
+  extern char &ExpandISelPseudosID;
+
+  /// UnpackMachineBundles - This pass unpack machine instruction bundles.
+  extern char &UnpackMachineBundlesID;
+
+  FunctionPass *
+  createUnpackMachineBundles(std::function<bool(const MachineFunction &)> Ftor);
+
+  /// FinalizeMachineBundles - This pass finalize machine instruction
+  /// bundles (created earlier, e.g. during pre-RA scheduling).
+  extern char &FinalizeMachineBundlesID;
+
+  /// StackMapLiveness - This pass analyses the register live-out set of
+  /// stackmap/patchpoint intrinsics and attaches the calculated information to
+  /// the intrinsic for later emission to the StackMap.
+  extern char &StackMapLivenessID;
+
+  /// LiveDebugValues pass
+  extern char &LiveDebugValuesID;
+
+  /// createJumpInstrTables - This pass creates jump-instruction tables.
+  ModulePass *createJumpInstrTablesPass();
+
+  /// createForwardControlFlowIntegrityPass - This pass adds control-flow
+  /// integrity.
+  ModulePass *createForwardControlFlowIntegrityPass();
+
+  /// InterleavedAccess Pass - This pass identifies and matches interleaved
+  /// memory accesses to target specific intrinsics.
+  ///
+  FunctionPass *createInterleavedAccessPass();
+
+  /// LowerEmuTLS - This pass generates __emutls_[vt].xyz variables for all
+  /// TLS variables for the emulated TLS model.
+  ///
+  ModulePass *createLowerEmuTLSPass();
+
+  /// This pass lowers the @llvm.load.relative intrinsic to instructions.
+  /// This is unsafe to do earlier because a pass may combine the constant
+  /// initializer into the load, which may result in an overflowing evaluation.
+  ModulePass *createPreISelIntrinsicLoweringPass();
+
+  /// GlobalMerge - This pass merges internal (by default) globals into structs
+  /// to enable reuse of a base pointer by indexed addressing modes.
+  /// It can also be configured to focus on size optimizations only.
+  ///
+  Pass *createGlobalMergePass(const TargetMachine *TM, unsigned MaximalOffset,
+                              bool OnlyOptimizeForSize = false,
+                              bool MergeExternalByDefault = false);
+
+  /// This pass splits the stack into a safe stack and an unsafe stack to
+  /// protect against stack-based overflow vulnerabilities.
+  FunctionPass *createSafeStackPass();
+
+  /// This pass detects subregister lanes in a virtual register that are used
+  /// independently of other lanes and splits them into separate virtual
+  /// registers.
+  extern char &RenameIndependentSubregsID;
+
+  /// This pass is executed POST-RA to collect which physical registers are
+  /// preserved by given machine function.
+  FunctionPass *createRegUsageInfoCollector();
+
+  /// Return a MachineFunction pass that identifies call sites
+  /// and propagates register usage information of callee to caller
+  /// if available with PysicalRegisterUsageInfo pass.
+  FunctionPass *createRegUsageInfoPropPass();
+
+  /// This pass performs software pipelining on machine instructions.
+  extern char &MachinePipelinerID;
+
+  /// This pass frees the memory occupied by the MachineFunction.
+  FunctionPass *createFreeMachineFunctionPass();
+
+  /// This pass performs outlining on machine instructions directly before
+  /// printing assembly.
+  ModulePass *createMachineOutlinerPass(bool OutlineFromLinkOnceODRs = false);
+
+  /// This pass expands the experimental reduction intrinsics into sequences of
+  /// shuffles.
+  FunctionPass *createExpandReductionsPass();
+
+  // This pass expands memcmp() to load/stores.
+  FunctionPass *createExpandMemCmpPass();
+
+  /// Creates Break False Dependencies pass. \see BreakFalseDeps.cpp
+  FunctionPass *createBreakFalseDeps();
+
+  // This pass expands indirectbr instructions.
+  FunctionPass *createIndirectBrExpandPass();
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/PreISelIntrinsicLowering.h b/linux-x64/clang/include/llvm/CodeGen/PreISelIntrinsicLowering.h
new file mode 100644
index 0000000..7a007eb
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/PreISelIntrinsicLowering.h
@@ -0,0 +1,29 @@
+//===- PreISelIntrinsicLowering.h - Pre-ISel intrinsic lowering pass ------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass implements IR lowering for the llvm.load.relative intrinsic.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CODEGEN_PREISELINTRINSICLOWERING_H
+#define LLVM_CODEGEN_PREISELINTRINSICLOWERING_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+struct PreISelIntrinsicLoweringPass
+    : PassInfoMixin<PreISelIntrinsicLoweringPass> {
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_PREISELINTRINSICLOWERING_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/PseudoSourceValue.h b/linux-x64/clang/include/llvm/CodeGen/PseudoSourceValue.h
new file mode 100644
index 0000000..bdf0bb7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/PseudoSourceValue.h
@@ -0,0 +1,198 @@
+//===-- llvm/CodeGen/PseudoSourceValue.h ------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the PseudoSourceValue class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PSEUDOSOURCEVALUE_H
+#define LLVM_CODEGEN_PSEUDOSOURCEVALUE_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/ValueMap.h"
+#include <map>
+
+namespace llvm {
+
+class MachineFrameInfo;
+class MachineMemOperand;
+class raw_ostream;
+class TargetInstrInfo;
+
+raw_ostream &operator<<(raw_ostream &OS, const MachineMemOperand &MMO);
+class PseudoSourceValue;
+raw_ostream &operator<<(raw_ostream &OS, const PseudoSourceValue* PSV);
+
+/// Special value supplied for machine level alias analysis. It indicates that
+/// a memory access references the functions stack frame (e.g., a spill slot),
+/// below the stack frame (e.g., argument space), or constant pool.
+class PseudoSourceValue {
+public:
+  enum PSVKind {
+    Stack,
+    GOT,
+    JumpTable,
+    ConstantPool,
+    FixedStack,
+    GlobalValueCallEntry,
+    ExternalSymbolCallEntry,
+    TargetCustom
+  };
+
+private:
+  PSVKind Kind;
+  unsigned AddressSpace;
+  friend raw_ostream &llvm::operator<<(raw_ostream &OS,
+                                       const PseudoSourceValue* PSV);
+
+  friend class MachineMemOperand; // For printCustom().
+
+  /// Implement printing for PseudoSourceValue. This is called from
+  /// Value::print or Value's operator<<.
+  virtual void printCustom(raw_ostream &O) const;
+
+public:
+  explicit PseudoSourceValue(PSVKind Kind, const TargetInstrInfo &TII);
+
+  virtual ~PseudoSourceValue();
+
+  PSVKind kind() const { return Kind; }
+
+  bool isStack() const { return Kind == Stack; }
+  bool isGOT() const { return Kind == GOT; }
+  bool isConstantPool() const { return Kind == ConstantPool; }
+  bool isJumpTable() const { return Kind == JumpTable; }
+
+  unsigned getAddressSpace() const { return AddressSpace; }
+
+  unsigned getTargetCustom() const {
+    return (Kind >= TargetCustom) ? ((Kind+1) - TargetCustom) : 0;
+  }
+
+  /// Test whether the memory pointed to by this PseudoSourceValue has a
+  /// constant value.
+  virtual bool isConstant(const MachineFrameInfo *) const;
+
+  /// Test whether the memory pointed to by this PseudoSourceValue may also be
+  /// pointed to by an LLVM IR Value.
+  virtual bool isAliased(const MachineFrameInfo *) const;
+
+  /// Return true if the memory pointed to by this PseudoSourceValue can ever
+  /// alias an LLVM IR Value.
+  virtual bool mayAlias(const MachineFrameInfo *) const;
+};
+
+/// A specialized PseudoSourceValue for holding FixedStack values, which must
+/// include a frame index.
+class FixedStackPseudoSourceValue : public PseudoSourceValue {
+  const int FI;
+
+public:
+  explicit FixedStackPseudoSourceValue(int FI, const TargetInstrInfo &TII)
+      : PseudoSourceValue(FixedStack, TII), FI(FI) {}
+
+  static bool classof(const PseudoSourceValue *V) {
+    return V->kind() == FixedStack;
+  }
+
+  bool isConstant(const MachineFrameInfo *MFI) const override;
+
+  bool isAliased(const MachineFrameInfo *MFI) const override;
+
+  bool mayAlias(const MachineFrameInfo *) const override;
+
+  void printCustom(raw_ostream &OS) const override;
+
+  int getFrameIndex() const { return FI; }
+};
+
+class CallEntryPseudoSourceValue : public PseudoSourceValue {
+protected:
+  CallEntryPseudoSourceValue(PSVKind Kind, const TargetInstrInfo &TII);
+
+public:
+  bool isConstant(const MachineFrameInfo *) const override;
+  bool isAliased(const MachineFrameInfo *) const override;
+  bool mayAlias(const MachineFrameInfo *) const override;
+};
+
+/// A specialized pseudo soruce value for holding GlobalValue values.
+class GlobalValuePseudoSourceValue : public CallEntryPseudoSourceValue {
+  const GlobalValue *GV;
+
+public:
+  GlobalValuePseudoSourceValue(const GlobalValue *GV,
+                               const TargetInstrInfo &TII);
+
+  static bool classof(const PseudoSourceValue *V) {
+    return V->kind() == GlobalValueCallEntry;
+  }
+
+  const GlobalValue *getValue() const { return GV; }
+};
+
+/// A specialized pseudo source value for holding external symbol values.
+class ExternalSymbolPseudoSourceValue : public CallEntryPseudoSourceValue {
+  const char *ES;
+
+public:
+  ExternalSymbolPseudoSourceValue(const char *ES, const TargetInstrInfo &TII);
+
+  static bool classof(const PseudoSourceValue *V) {
+    return V->kind() == ExternalSymbolCallEntry;
+  }
+
+  const char *getSymbol() const { return ES; }
+};
+
+/// Manages creation of pseudo source values.
+class PseudoSourceValueManager {
+  const TargetInstrInfo &TII;
+  const PseudoSourceValue StackPSV, GOTPSV, JumpTablePSV, ConstantPoolPSV;
+  std::map<int, std::unique_ptr<FixedStackPseudoSourceValue>> FSValues;
+  StringMap<std::unique_ptr<const ExternalSymbolPseudoSourceValue>>
+      ExternalCallEntries;
+  ValueMap<const GlobalValue *,
+           std::unique_ptr<const GlobalValuePseudoSourceValue>>
+      GlobalCallEntries;
+
+public:
+  PseudoSourceValueManager(const TargetInstrInfo &TII);
+
+  /// Return a pseudo source value referencing the area below the stack frame of
+  /// a function, e.g., the argument space.
+  const PseudoSourceValue *getStack();
+
+  /// Return a pseudo source value referencing the global offset table
+  /// (or something the like).
+  const PseudoSourceValue *getGOT();
+
+  /// Return a pseudo source value referencing the constant pool. Since constant
+  /// pools are constant, this doesn't need to identify a specific constant
+  /// pool entry.
+  const PseudoSourceValue *getConstantPool();
+
+  /// Return a pseudo source value referencing a jump table. Since jump tables
+  /// are constant, this doesn't need to identify a specific jump table.
+  const PseudoSourceValue *getJumpTable();
+
+  /// Return a pseudo source value referencing a fixed stack frame entry,
+  /// e.g., a spill slot.
+  const PseudoSourceValue *getFixedStack(int FI);
+
+  const PseudoSourceValue *getGlobalValueCallEntry(const GlobalValue *GV);
+
+  const PseudoSourceValue *getExternalSymbolCallEntry(const char *ES);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/ReachingDefAnalysis.h b/linux-x64/clang/include/llvm/CodeGen/ReachingDefAnalysis.h
new file mode 100644
index 0000000..b21b745
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ReachingDefAnalysis.h
@@ -0,0 +1,118 @@
+//==--- llvm/CodeGen/ReachingDefAnalysis.h - Reaching Def Analysis -*- C++ -*---==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Reaching Defs Analysis pass.
+///
+/// This pass tracks for each instruction what is the “closest” reaching def of
+/// a given register. It is used by BreakFalseDeps (for clearance calculation)
+/// and ExecutionDomainFix (for arbitrating conflicting domains).
+///
+/// Note that this is different from the usual definition notion of liveness.
+/// The CPU doesn't care whether or not we consider a register killed.
+///
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REACHINGDEFSANALYSIS_H
+#define LLVM_CODEGEN_REACHINGDEFSANALYSIS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/LoopTraversal.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineInstr;
+
+/// This class provides the reaching def analysis.
+class ReachingDefAnalysis : public MachineFunctionPass {
+private:
+  MachineFunction *MF;
+  const TargetRegisterInfo *TRI;
+  unsigned NumRegUnits;
+  /// Instruction that defined each register, relative to the beginning of the
+  /// current basic block.  When a LiveRegsDefInfo is used to represent a
+  /// live-out register, this value is relative to the end of the basic block,
+  /// so it will be a negative number.
+  using LiveRegsDefInfo = std::vector<int>;
+  LiveRegsDefInfo LiveRegs;
+
+  /// Keeps clearance information for all registers. Note that this
+  /// is different from the usual definition notion of liveness. The CPU
+  /// doesn't care whether or not we consider a register killed.
+  using OutRegsInfoMap = SmallVector<LiveRegsDefInfo, 4>;
+  OutRegsInfoMap MBBOutRegsInfos;
+
+  /// Current instruction number.
+  /// The first instruction in each basic block is 0.
+  int CurInstr;
+
+  /// Maps instructions to their instruction Ids, relative to the begining of
+  /// their basic blocks.
+  DenseMap<MachineInstr *, int> InstIds;
+
+  /// All reaching defs of a given RegUnit for a given MBB.
+  using MBBRegUnitDefs = SmallVector<int, 1>;
+  /// All reaching defs of all reg units for a given MBB
+  using MBBDefsInfo = std::vector<MBBRegUnitDefs>;
+  /// All reaching defs of all reg units for a all MBBs
+  using MBBReachingDefsInfo = SmallVector<MBBDefsInfo, 4>;
+  MBBReachingDefsInfo MBBReachingDefs;
+
+  /// Default values are 'nothing happened a long time ago'.
+  const int ReachingDefDefaultVal = -(1 << 20);
+
+public:
+  static char ID; // Pass identification, replacement for typeid
+
+  ReachingDefAnalysis() : MachineFunctionPass(ID) {
+    initializeReachingDefAnalysisPass(*PassRegistry::getPassRegistry());
+  }
+  void releaseMemory() override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  MachineFunctionProperties getRequiredProperties() const override {
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::NoVRegs);
+  }
+
+  /// Provides the instruction id of the closest reaching def instruction of
+  /// PhysReg that reaches MI, relative to the begining of MI's basic block.
+  int getReachingDef(MachineInstr *MI, int PhysReg);
+
+  /// Provides the clearance - the number of instructions since the closest
+  /// reaching def instuction of PhysReg that reaches MI.
+  int getClearance(MachineInstr *MI, MCPhysReg PhysReg);
+
+private:
+  /// Set up LiveRegs by merging predecessor live-out values.
+  void enterBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);
+
+  /// Update live-out values.
+  void leaveBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);
+
+  /// Process he given basic block.
+  void processBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);
+
+  /// Update def-ages for registers defined by MI.
+  /// Also break dependencies on partial defs and undef uses.
+  void processDefs(MachineInstr *);
+};
+
+} // namespace llvm
+
+#endif // LLVM_CODEGEN_REACHINGDEFSANALYSIS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/RegAllocPBQP.h b/linux-x64/clang/include/llvm/CodeGen/RegAllocPBQP.h
new file mode 100644
index 0000000..5b34286
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/RegAllocPBQP.h
@@ -0,0 +1,536 @@
+//===- RegAllocPBQP.h -------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PBQPBuilder interface, for classes which build PBQP
+// instances to represent register allocation problems, and the RegAllocPBQP
+// interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGALLOCPBQP_H
+#define LLVM_CODEGEN_REGALLOCPBQP_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/CodeGen/PBQP/CostAllocator.h"
+#include "llvm/CodeGen/PBQP/Graph.h"
+#include "llvm/CodeGen/PBQP/Math.h"
+#include "llvm/CodeGen/PBQP/ReductionRules.h"
+#include "llvm/CodeGen/PBQP/Solution.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <limits>
+#include <memory>
+#include <set>
+#include <vector>
+
+namespace llvm {
+
+class FunctionPass;
+class LiveIntervals;
+class MachineBlockFrequencyInfo;
+class MachineFunction;
+class raw_ostream;
+
+namespace PBQP {
+namespace RegAlloc {
+
+/// @brief Spill option index.
+inline unsigned getSpillOptionIdx() { return 0; }
+
+/// \brief Metadata to speed allocatability test.
+///
+/// Keeps track of the number of infinities in each row and column.
+class MatrixMetadata {
+public:
+  MatrixMetadata(const Matrix& M)
+    : UnsafeRows(new bool[M.getRows() - 1]()),
+      UnsafeCols(new bool[M.getCols() - 1]()) {
+    unsigned* ColCounts = new unsigned[M.getCols() - 1]();
+
+    for (unsigned i = 1; i < M.getRows(); ++i) {
+      unsigned RowCount = 0;
+      for (unsigned j = 1; j < M.getCols(); ++j) {
+        if (M[i][j] == std::numeric_limits<PBQPNum>::infinity()) {
+          ++RowCount;
+          ++ColCounts[j - 1];
+          UnsafeRows[i - 1] = true;
+          UnsafeCols[j - 1] = true;
+        }
+      }
+      WorstRow = std::max(WorstRow, RowCount);
+    }
+    unsigned WorstColCountForCurRow =
+      *std::max_element(ColCounts, ColCounts + M.getCols() - 1);
+    WorstCol = std::max(WorstCol, WorstColCountForCurRow);
+    delete[] ColCounts;
+  }
+
+  MatrixMetadata(const MatrixMetadata &) = delete;
+  MatrixMetadata &operator=(const MatrixMetadata &) = delete;
+
+  unsigned getWorstRow() const { return WorstRow; }
+  unsigned getWorstCol() const { return WorstCol; }
+  const bool* getUnsafeRows() const { return UnsafeRows.get(); }
+  const bool* getUnsafeCols() const { return UnsafeCols.get(); }
+
+private:
+  unsigned WorstRow = 0;
+  unsigned WorstCol = 0;
+  std::unique_ptr<bool[]> UnsafeRows;
+  std::unique_ptr<bool[]> UnsafeCols;
+};
+
+/// \brief Holds a vector of the allowed physical regs for a vreg.
+class AllowedRegVector {
+  friend hash_code hash_value(const AllowedRegVector &);
+
+public:
+  AllowedRegVector() = default;
+  AllowedRegVector(AllowedRegVector &&) = default;
+
+  AllowedRegVector(const std::vector<unsigned> &OptVec)
+    : NumOpts(OptVec.size()), Opts(new unsigned[NumOpts]) {
+    std::copy(OptVec.begin(), OptVec.end(), Opts.get());
+  }
+
+  unsigned size() const { return NumOpts; }
+  unsigned operator[](size_t I) const { return Opts[I]; }
+
+  bool operator==(const AllowedRegVector &Other) const {
+    if (NumOpts != Other.NumOpts)
+      return false;
+    return std::equal(Opts.get(), Opts.get() + NumOpts, Other.Opts.get());
+  }
+
+  bool operator!=(const AllowedRegVector &Other) const {
+    return !(*this == Other);
+  }
+
+private:
+  unsigned NumOpts = 0;
+  std::unique_ptr<unsigned[]> Opts;
+};
+
+inline hash_code hash_value(const AllowedRegVector &OptRegs) {
+  unsigned *OStart = OptRegs.Opts.get();
+  unsigned *OEnd = OptRegs.Opts.get() + OptRegs.NumOpts;
+  return hash_combine(OptRegs.NumOpts,
+                      hash_combine_range(OStart, OEnd));
+}
+
+/// \brief Holds graph-level metadata relevant to PBQP RA problems.
+class GraphMetadata {
+private:
+  using AllowedRegVecPool = ValuePool<AllowedRegVector>;
+
+public:
+  using AllowedRegVecRef = AllowedRegVecPool::PoolRef;
+
+  GraphMetadata(MachineFunction &MF,
+                LiveIntervals &LIS,
+                MachineBlockFrequencyInfo &MBFI)
+    : MF(MF), LIS(LIS), MBFI(MBFI) {}
+
+  MachineFunction &MF;
+  LiveIntervals &LIS;
+  MachineBlockFrequencyInfo &MBFI;
+
+  void setNodeIdForVReg(unsigned VReg, GraphBase::NodeId NId) {
+    VRegToNodeId[VReg] = NId;
+  }
+
+  GraphBase::NodeId getNodeIdForVReg(unsigned VReg) const {
+    auto VRegItr = VRegToNodeId.find(VReg);
+    if (VRegItr == VRegToNodeId.end())
+      return GraphBase::invalidNodeId();
+    return VRegItr->second;
+  }
+
+  AllowedRegVecRef getAllowedRegs(AllowedRegVector Allowed) {
+    return AllowedRegVecs.getValue(std::move(Allowed));
+  }
+
+private:
+  DenseMap<unsigned, GraphBase::NodeId> VRegToNodeId;
+  AllowedRegVecPool AllowedRegVecs;
+};
+
+/// \brief Holds solver state and other metadata relevant to each PBQP RA node.
+class NodeMetadata {
+public:
+  using AllowedRegVector = RegAlloc::AllowedRegVector;
+
+  // The node's reduction state. The order in this enum is important,
+  // as it is assumed nodes can only progress up (i.e. towards being
+  // optimally reducible) when reducing the graph.
+  using ReductionState = enum {
+    Unprocessed,
+    NotProvablyAllocatable,
+    ConservativelyAllocatable,
+    OptimallyReducible
+  };
+
+  NodeMetadata() = default;
+
+  NodeMetadata(const NodeMetadata &Other)
+    : RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
+      OptUnsafeEdges(new unsigned[NumOpts]), VReg(Other.VReg),
+      AllowedRegs(Other.AllowedRegs)
+#ifndef NDEBUG
+      , everConservativelyAllocatable(Other.everConservativelyAllocatable)
+#endif
+  {
+    if (NumOpts > 0) {
+      std::copy(&Other.OptUnsafeEdges[0], &Other.OptUnsafeEdges[NumOpts],
+                &OptUnsafeEdges[0]);
+    }
+  }
+
+  NodeMetadata(NodeMetadata &&) = default;
+  NodeMetadata& operator=(NodeMetadata &&) = default;
+
+  void setVReg(unsigned VReg) { this->VReg = VReg; }
+  unsigned getVReg() const { return VReg; }
+
+  void setAllowedRegs(GraphMetadata::AllowedRegVecRef AllowedRegs) {
+    this->AllowedRegs = std::move(AllowedRegs);
+  }
+  const AllowedRegVector& getAllowedRegs() const { return *AllowedRegs; }
+
+  void setup(const Vector& Costs) {
+    NumOpts = Costs.getLength() - 1;
+    OptUnsafeEdges = std::unique_ptr<unsigned[]>(new unsigned[NumOpts]());
+  }
+
+  ReductionState getReductionState() const { return RS; }
+  void setReductionState(ReductionState RS) {
+    assert(RS >= this->RS && "A node's reduction state can not be downgraded");
+    this->RS = RS;
+
+#ifndef NDEBUG
+    // Remember this state to assert later that a non-infinite register
+    // option was available.
+    if (RS == ConservativelyAllocatable)
+      everConservativelyAllocatable = true;
+#endif
+  }
+
+  void handleAddEdge(const MatrixMetadata& MD, bool Transpose) {
+    DeniedOpts += Transpose ? MD.getWorstRow() : MD.getWorstCol();
+    const bool* UnsafeOpts =
+      Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
+    for (unsigned i = 0; i < NumOpts; ++i)
+      OptUnsafeEdges[i] += UnsafeOpts[i];
+  }
+
+  void handleRemoveEdge(const MatrixMetadata& MD, bool Transpose) {
+    DeniedOpts -= Transpose ? MD.getWorstRow() : MD.getWorstCol();
+    const bool* UnsafeOpts =
+      Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
+    for (unsigned i = 0; i < NumOpts; ++i)
+      OptUnsafeEdges[i] -= UnsafeOpts[i];
+  }
+
+  bool isConservativelyAllocatable() const {
+    return (DeniedOpts < NumOpts) ||
+      (std::find(&OptUnsafeEdges[0], &OptUnsafeEdges[NumOpts], 0) !=
+       &OptUnsafeEdges[NumOpts]);
+  }
+
+#ifndef NDEBUG
+  bool wasConservativelyAllocatable() const {
+    return everConservativelyAllocatable;
+  }
+#endif
+
+private:
+  ReductionState RS = Unprocessed;
+  unsigned NumOpts = 0;
+  unsigned DeniedOpts = 0;
+  std::unique_ptr<unsigned[]> OptUnsafeEdges;
+  unsigned VReg = 0;
+  GraphMetadata::AllowedRegVecRef AllowedRegs;
+
+#ifndef NDEBUG
+  bool everConservativelyAllocatable = false;
+#endif
+};
+
+class RegAllocSolverImpl {
+private:
+  using RAMatrix = MDMatrix<MatrixMetadata>;
+
+public:
+  using RawVector = PBQP::Vector;
+  using RawMatrix = PBQP::Matrix;
+  using Vector = PBQP::Vector;
+  using Matrix = RAMatrix;
+  using CostAllocator = PBQP::PoolCostAllocator<Vector, Matrix>;
+
+  using NodeId = GraphBase::NodeId;
+  using EdgeId = GraphBase::EdgeId;
+
+  using NodeMetadata = RegAlloc::NodeMetadata;
+  struct EdgeMetadata {};
+  using GraphMetadata = RegAlloc::GraphMetadata;
+
+  using Graph = PBQP::Graph<RegAllocSolverImpl>;
+
+  RegAllocSolverImpl(Graph &G) : G(G) {}
+
+  Solution solve() {
+    G.setSolver(*this);
+    Solution S;
+    setup();
+    S = backpropagate(G, reduce());
+    G.unsetSolver();
+    return S;
+  }
+
+  void handleAddNode(NodeId NId) {
+    assert(G.getNodeCosts(NId).getLength() > 1 &&
+           "PBQP Graph should not contain single or zero-option nodes");
+    G.getNodeMetadata(NId).setup(G.getNodeCosts(NId));
+  }
+
+  void handleRemoveNode(NodeId NId) {}
+  void handleSetNodeCosts(NodeId NId, const Vector& newCosts) {}
+
+  void handleAddEdge(EdgeId EId) {
+    handleReconnectEdge(EId, G.getEdgeNode1Id(EId));
+    handleReconnectEdge(EId, G.getEdgeNode2Id(EId));
+  }
+
+  void handleDisconnectEdge(EdgeId EId, NodeId NId) {
+    NodeMetadata& NMd = G.getNodeMetadata(NId);
+    const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
+    NMd.handleRemoveEdge(MMd, NId == G.getEdgeNode2Id(EId));
+    promote(NId, NMd);
+  }
+
+  void handleReconnectEdge(EdgeId EId, NodeId NId) {
+    NodeMetadata& NMd = G.getNodeMetadata(NId);
+    const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
+    NMd.handleAddEdge(MMd, NId == G.getEdgeNode2Id(EId));
+  }
+
+  void handleUpdateCosts(EdgeId EId, const Matrix& NewCosts) {
+    NodeId N1Id = G.getEdgeNode1Id(EId);
+    NodeId N2Id = G.getEdgeNode2Id(EId);
+    NodeMetadata& N1Md = G.getNodeMetadata(N1Id);
+    NodeMetadata& N2Md = G.getNodeMetadata(N2Id);
+    bool Transpose = N1Id != G.getEdgeNode1Id(EId);
+
+    // Metadata are computed incrementally. First, update them
+    // by removing the old cost.
+    const MatrixMetadata& OldMMd = G.getEdgeCosts(EId).getMetadata();
+    N1Md.handleRemoveEdge(OldMMd, Transpose);
+    N2Md.handleRemoveEdge(OldMMd, !Transpose);
+
+    // And update now the metadata with the new cost.
+    const MatrixMetadata& MMd = NewCosts.getMetadata();
+    N1Md.handleAddEdge(MMd, Transpose);
+    N2Md.handleAddEdge(MMd, !Transpose);
+
+    // As the metadata may have changed with the update, the nodes may have
+    // become ConservativelyAllocatable or OptimallyReducible.
+    promote(N1Id, N1Md);
+    promote(N2Id, N2Md);
+  }
+
+private:
+  void promote(NodeId NId, NodeMetadata& NMd) {
+    if (G.getNodeDegree(NId) == 3) {
+      // This node is becoming optimally reducible.
+      moveToOptimallyReducibleNodes(NId);
+    } else if (NMd.getReductionState() ==
+               NodeMetadata::NotProvablyAllocatable &&
+               NMd.isConservativelyAllocatable()) {
+      // This node just became conservatively allocatable.
+      moveToConservativelyAllocatableNodes(NId);
+    }
+  }
+
+  void removeFromCurrentSet(NodeId NId) {
+    switch (G.getNodeMetadata(NId).getReductionState()) {
+    case NodeMetadata::Unprocessed: break;
+    case NodeMetadata::OptimallyReducible:
+      assert(OptimallyReducibleNodes.find(NId) !=
+             OptimallyReducibleNodes.end() &&
+             "Node not in optimally reducible set.");
+      OptimallyReducibleNodes.erase(NId);
+      break;
+    case NodeMetadata::ConservativelyAllocatable:
+      assert(ConservativelyAllocatableNodes.find(NId) !=
+             ConservativelyAllocatableNodes.end() &&
+             "Node not in conservatively allocatable set.");
+      ConservativelyAllocatableNodes.erase(NId);
+      break;
+    case NodeMetadata::NotProvablyAllocatable:
+      assert(NotProvablyAllocatableNodes.find(NId) !=
+             NotProvablyAllocatableNodes.end() &&
+             "Node not in not-provably-allocatable set.");
+      NotProvablyAllocatableNodes.erase(NId);
+      break;
+    }
+  }
+
+  void moveToOptimallyReducibleNodes(NodeId NId) {
+    removeFromCurrentSet(NId);
+    OptimallyReducibleNodes.insert(NId);
+    G.getNodeMetadata(NId).setReductionState(
+      NodeMetadata::OptimallyReducible);
+  }
+
+  void moveToConservativelyAllocatableNodes(NodeId NId) {
+    removeFromCurrentSet(NId);
+    ConservativelyAllocatableNodes.insert(NId);
+    G.getNodeMetadata(NId).setReductionState(
+      NodeMetadata::ConservativelyAllocatable);
+  }
+
+  void moveToNotProvablyAllocatableNodes(NodeId NId) {
+    removeFromCurrentSet(NId);
+    NotProvablyAllocatableNodes.insert(NId);
+    G.getNodeMetadata(NId).setReductionState(
+      NodeMetadata::NotProvablyAllocatable);
+  }
+
+  void setup() {
+    // Set up worklists.
+    for (auto NId : G.nodeIds()) {
+      if (G.getNodeDegree(NId) < 3)
+        moveToOptimallyReducibleNodes(NId);
+      else if (G.getNodeMetadata(NId).isConservativelyAllocatable())
+        moveToConservativelyAllocatableNodes(NId);
+      else
+        moveToNotProvablyAllocatableNodes(NId);
+    }
+  }
+
+  // Compute a reduction order for the graph by iteratively applying PBQP
+  // reduction rules. Locally optimal rules are applied whenever possible (R0,
+  // R1, R2). If no locally-optimal rules apply then any conservatively
+  // allocatable node is reduced. Finally, if no conservatively allocatable
+  // node exists then the node with the lowest spill-cost:degree ratio is
+  // selected.
+  std::vector<GraphBase::NodeId> reduce() {
+    assert(!G.empty() && "Cannot reduce empty graph.");
+
+    using NodeId = GraphBase::NodeId;
+    std::vector<NodeId> NodeStack;
+
+    // Consume worklists.
+    while (true) {
+      if (!OptimallyReducibleNodes.empty()) {
+        NodeSet::iterator NItr = OptimallyReducibleNodes.begin();
+        NodeId NId = *NItr;
+        OptimallyReducibleNodes.erase(NItr);
+        NodeStack.push_back(NId);
+        switch (G.getNodeDegree(NId)) {
+        case 0:
+          break;
+        case 1:
+          applyR1(G, NId);
+          break;
+        case 2:
+          applyR2(G, NId);
+          break;
+        default: llvm_unreachable("Not an optimally reducible node.");
+        }
+      } else if (!ConservativelyAllocatableNodes.empty()) {
+        // Conservatively allocatable nodes will never spill. For now just
+        // take the first node in the set and push it on the stack. When we
+        // start optimizing more heavily for register preferencing, it may
+        // would be better to push nodes with lower 'expected' or worst-case
+        // register costs first (since early nodes are the most
+        // constrained).
+        NodeSet::iterator NItr = ConservativelyAllocatableNodes.begin();
+        NodeId NId = *NItr;
+        ConservativelyAllocatableNodes.erase(NItr);
+        NodeStack.push_back(NId);
+        G.disconnectAllNeighborsFromNode(NId);
+      } else if (!NotProvablyAllocatableNodes.empty()) {
+        NodeSet::iterator NItr =
+          std::min_element(NotProvablyAllocatableNodes.begin(),
+                           NotProvablyAllocatableNodes.end(),
+                           SpillCostComparator(G));
+        NodeId NId = *NItr;
+        NotProvablyAllocatableNodes.erase(NItr);
+        NodeStack.push_back(NId);
+        G.disconnectAllNeighborsFromNode(NId);
+      } else
+        break;
+    }
+
+    return NodeStack;
+  }
+
+  class SpillCostComparator {
+  public:
+    SpillCostComparator(const Graph& G) : G(G) {}
+
+    bool operator()(NodeId N1Id, NodeId N2Id) {
+      PBQPNum N1SC = G.getNodeCosts(N1Id)[0];
+      PBQPNum N2SC = G.getNodeCosts(N2Id)[0];
+      if (N1SC == N2SC)
+        return G.getNodeDegree(N1Id) < G.getNodeDegree(N2Id);
+      return N1SC < N2SC;
+    }
+
+  private:
+    const Graph& G;
+  };
+
+  Graph& G;
+  using NodeSet = std::set<NodeId>;
+  NodeSet OptimallyReducibleNodes;
+  NodeSet ConservativelyAllocatableNodes;
+  NodeSet NotProvablyAllocatableNodes;
+};
+
+class PBQPRAGraph : public PBQP::Graph<RegAllocSolverImpl> {
+private:
+  using BaseT = PBQP::Graph<RegAllocSolverImpl>;
+
+public:
+  PBQPRAGraph(GraphMetadata Metadata) : BaseT(std::move(Metadata)) {}
+
+  /// @brief Dump this graph to dbgs().
+  void dump() const;
+
+  /// @brief Dump this graph to an output stream.
+  /// @param OS Output stream to print on.
+  void dump(raw_ostream &OS) const;
+
+  /// @brief Print a representation of this graph in DOT format.
+  /// @param OS Output stream to print on.
+  void printDot(raw_ostream &OS) const;
+};
+
+inline Solution solve(PBQPRAGraph& G) {
+  if (G.empty())
+    return Solution();
+  RegAllocSolverImpl RegAllocSolver(G);
+  return RegAllocSolver.solve();
+}
+
+} // end namespace RegAlloc
+} // end namespace PBQP
+
+/// @brief Create a PBQP register allocator instance.
+FunctionPass *
+createPBQPRegisterAllocator(char *customPassID = nullptr);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_REGALLOCPBQP_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/RegAllocRegistry.h b/linux-x64/clang/include/llvm/CodeGen/RegAllocRegistry.h
new file mode 100644
index 0000000..481747d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/RegAllocRegistry.h
@@ -0,0 +1,66 @@
+//===- llvm/CodeGen/RegAllocRegistry.h --------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation for register allocator function
+// pass registry (RegisterRegAlloc).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGALLOCREGISTRY_H
+#define LLVM_CODEGEN_REGALLOCREGISTRY_H
+
+#include "llvm/CodeGen/MachinePassRegistry.h"
+
+namespace llvm {
+
+class FunctionPass;
+
+//===----------------------------------------------------------------------===//
+///
+/// RegisterRegAlloc class - Track the registration of register allocators.
+///
+//===----------------------------------------------------------------------===//
+class RegisterRegAlloc : public MachinePassRegistryNode {
+public:
+  using FunctionPassCtor = FunctionPass *(*)();
+
+  static MachinePassRegistry Registry;
+
+  RegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
+      : MachinePassRegistryNode(N, D, (MachinePassCtor)C) {
+    Registry.Add(this);
+  }
+
+  ~RegisterRegAlloc() { Registry.Remove(this); }
+
+  // Accessors.
+  RegisterRegAlloc *getNext() const {
+    return (RegisterRegAlloc *)MachinePassRegistryNode::getNext();
+  }
+
+  static RegisterRegAlloc *getList() {
+    return (RegisterRegAlloc *)Registry.getList();
+  }
+
+  static FunctionPassCtor getDefault() {
+    return (FunctionPassCtor)Registry.getDefault();
+  }
+
+  static void setDefault(FunctionPassCtor C) {
+    Registry.setDefault((MachinePassCtor)C);
+  }
+
+  static void setListener(MachinePassRegistryListener *L) {
+    Registry.setListener(L);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_REGALLOCREGISTRY_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/RegisterClassInfo.h b/linux-x64/clang/include/llvm/CodeGen/RegisterClassInfo.h
new file mode 100644
index 0000000..97113c5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/RegisterClassInfo.h
@@ -0,0 +1,150 @@
+//===- RegisterClassInfo.h - Dynamic Register Class Info --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the RegisterClassInfo class which provides dynamic
+// information about target register classes. Callee saved and reserved
+// registers depends on calling conventions and other dynamic information, so
+// some things cannot be determined statically.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGISTERCLASSINFO_H
+#define LLVM_CODEGEN_REGISTERCLASSINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include <cassert>
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+
+class RegisterClassInfo {
+  struct RCInfo {
+    unsigned Tag = 0;
+    unsigned NumRegs = 0;
+    bool ProperSubClass = false;
+    uint8_t MinCost = 0;
+    uint16_t LastCostChange = 0;
+    std::unique_ptr<MCPhysReg[]> Order;
+
+    RCInfo() = default;
+
+    operator ArrayRef<MCPhysReg>() const {
+      return makeArrayRef(Order.get(), NumRegs);
+    }
+  };
+
+  // Brief cached information for each register class.
+  std::unique_ptr<RCInfo[]> RegClass;
+
+  // Tag changes whenever cached information needs to be recomputed. An RCInfo
+  // entry is valid when its tag matches.
+  unsigned Tag = 0;
+
+  const MachineFunction *MF = nullptr;
+  const TargetRegisterInfo *TRI = nullptr;
+
+  // Callee saved registers of last MF. Assumed to be valid until the next
+  // runOnFunction() call.
+  // Used only to determine if an update was made to CalleeSavedAliases.
+  const MCPhysReg *CalleeSavedRegs = nullptr;
+
+  // Map register alias to the callee saved Register.
+  SmallVector<MCPhysReg, 4> CalleeSavedAliases;
+
+  // Reserved registers in the current MF.
+  BitVector Reserved;
+
+  std::unique_ptr<unsigned[]> PSetLimits;
+
+  // Compute all information about RC.
+  void compute(const TargetRegisterClass *RC) const;
+
+  // Return an up-to-date RCInfo for RC.
+  const RCInfo &get(const TargetRegisterClass *RC) const {
+    const RCInfo &RCI = RegClass[RC->getID()];
+    if (Tag != RCI.Tag)
+      compute(RC);
+    return RCI;
+  }
+
+public:
+  RegisterClassInfo();
+
+  /// runOnFunction - Prepare to answer questions about MF. This must be called
+  /// before any other methods are used.
+  void runOnMachineFunction(const MachineFunction &MF);
+
+  /// getNumAllocatableRegs - Returns the number of actually allocatable
+  /// registers in RC in the current function.
+  unsigned getNumAllocatableRegs(const TargetRegisterClass *RC) const {
+    return get(RC).NumRegs;
+  }
+
+  /// getOrder - Returns the preferred allocation order for RC. The order
+  /// contains no reserved registers, and registers that alias callee saved
+  /// registers come last.
+  ArrayRef<MCPhysReg> getOrder(const TargetRegisterClass *RC) const {
+    return get(RC);
+  }
+
+  /// isProperSubClass - Returns true if RC has a legal super-class with more
+  /// allocatable registers.
+  ///
+  /// Register classes like GR32_NOSP are not proper sub-classes because %esp
+  /// is not allocatable.  Similarly, tGPR is not a proper sub-class in Thumb
+  /// mode because the GPR super-class is not legal.
+  bool isProperSubClass(const TargetRegisterClass *RC) const {
+    return get(RC).ProperSubClass;
+  }
+
+  /// getLastCalleeSavedAlias - Returns the last callee saved register that
+  /// overlaps PhysReg, or 0 if Reg doesn't overlap a CalleeSavedAliases.
+  unsigned getLastCalleeSavedAlias(unsigned PhysReg) const {
+    assert(TargetRegisterInfo::isPhysicalRegister(PhysReg));
+    if (PhysReg < CalleeSavedAliases.size())
+      return CalleeSavedAliases[PhysReg];
+    return 0;
+  }
+
+  /// Get the minimum register cost in RC's allocation order.
+  /// This is the smallest value returned by TRI->getCostPerUse(Reg) for all
+  /// the registers in getOrder(RC).
+  unsigned getMinCost(const TargetRegisterClass *RC) {
+    return get(RC).MinCost;
+  }
+
+  /// Get the position of the last cost change in getOrder(RC).
+  ///
+  /// All registers in getOrder(RC).slice(getLastCostChange(RC)) will have the
+  /// same cost according to TRI->getCostPerUse().
+  unsigned getLastCostChange(const TargetRegisterClass *RC) {
+    return get(RC).LastCostChange;
+  }
+
+  /// Get the register unit limit for the given pressure set index.
+  ///
+  /// RegisterClassInfo adjusts this limit for reserved registers.
+  unsigned getRegPressureSetLimit(unsigned Idx) const {
+    if (!PSetLimits[Idx])
+      PSetLimits[Idx] = computePSetLimit(Idx);
+    return PSetLimits[Idx];
+  }
+
+protected:
+  unsigned computePSetLimit(unsigned Idx) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_REGISTERCLASSINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/RegisterPressure.h b/linux-x64/clang/include/llvm/CodeGen/RegisterPressure.h
new file mode 100644
index 0000000..2b14b78
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/RegisterPressure.h
@@ -0,0 +1,576 @@
+//===- RegisterPressure.h - Dynamic Register Pressure -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RegisterPressure class which can be used to track
+// MachineInstr level register pressure.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGISTERPRESSURE_H
+#define LLVM_CODEGEN_REGISTERPRESSURE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SparseSet.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/MC/LaneBitmask.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <limits>
+#include <vector>
+
+namespace llvm {
+
+class LiveIntervals;
+class MachineFunction;
+class MachineInstr;
+class MachineRegisterInfo;
+class RegisterClassInfo;
+
+struct RegisterMaskPair {
+  unsigned RegUnit; ///< Virtual register or register unit.
+  LaneBitmask LaneMask;
+
+  RegisterMaskPair(unsigned RegUnit, LaneBitmask LaneMask)
+      : RegUnit(RegUnit), LaneMask(LaneMask) {}
+};
+
+/// Base class for register pressure results.
+struct RegisterPressure {
+  /// Map of max reg pressure indexed by pressure set ID, not class ID.
+  std::vector<unsigned> MaxSetPressure;
+
+  /// List of live in virtual registers or physical register units.
+  SmallVector<RegisterMaskPair,8> LiveInRegs;
+  SmallVector<RegisterMaskPair,8> LiveOutRegs;
+
+  void dump(const TargetRegisterInfo *TRI) const;
+};
+
+/// RegisterPressure computed within a region of instructions delimited by
+/// TopIdx and BottomIdx.  During pressure computation, the maximum pressure per
+/// register pressure set is increased. Once pressure within a region is fully
+/// computed, the live-in and live-out sets are recorded.
+///
+/// This is preferable to RegionPressure when LiveIntervals are available,
+/// because delimiting regions by SlotIndex is more robust and convenient than
+/// holding block iterators. The block contents can change without invalidating
+/// the pressure result.
+struct IntervalPressure : RegisterPressure {
+  /// Record the boundary of the region being tracked.
+  SlotIndex TopIdx;
+  SlotIndex BottomIdx;
+
+  void reset();
+
+  void openTop(SlotIndex NextTop);
+
+  void openBottom(SlotIndex PrevBottom);
+};
+
+/// RegisterPressure computed within a region of instructions delimited by
+/// TopPos and BottomPos. This is a less precise version of IntervalPressure for
+/// use when LiveIntervals are unavailable.
+struct RegionPressure : RegisterPressure {
+  /// Record the boundary of the region being tracked.
+  MachineBasicBlock::const_iterator TopPos;
+  MachineBasicBlock::const_iterator BottomPos;
+
+  void reset();
+
+  void openTop(MachineBasicBlock::const_iterator PrevTop);
+
+  void openBottom(MachineBasicBlock::const_iterator PrevBottom);
+};
+
+/// Capture a change in pressure for a single pressure set. UnitInc may be
+/// expressed in terms of upward or downward pressure depending on the client
+/// and will be dynamically adjusted for current liveness.
+///
+/// Pressure increments are tiny, typically 1-2 units, and this is only for
+/// heuristics, so we don't check UnitInc overflow. Instead, we may have a
+/// higher level assert that pressure is consistent within a region. We also
+/// effectively ignore dead defs which don't affect heuristics much.
+class PressureChange {
+  uint16_t PSetID = 0; // ID+1. 0=Invalid.
+  int16_t UnitInc = 0;
+
+public:
+  PressureChange() = default;
+  PressureChange(unsigned id): PSetID(id + 1) {
+    assert(id < std::numeric_limits<uint16_t>::max() && "PSetID overflow.");
+  }
+
+  bool isValid() const { return PSetID > 0; }
+
+  unsigned getPSet() const {
+    assert(isValid() && "invalid PressureChange");
+    return PSetID - 1;
+  }
+
+  // If PSetID is invalid, return UINT16_MAX to give it lowest priority.
+  unsigned getPSetOrMax() const {
+    return (PSetID - 1) & std::numeric_limits<uint16_t>::max();
+  }
+
+  int getUnitInc() const { return UnitInc; }
+
+  void setUnitInc(int Inc) { UnitInc = Inc; }
+
+  bool operator==(const PressureChange &RHS) const {
+    return PSetID == RHS.PSetID && UnitInc == RHS.UnitInc;
+  }
+};
+
+template <> struct isPodLike<PressureChange> {
+   static const bool value = true;
+};
+
+/// List of PressureChanges in order of increasing, unique PSetID.
+///
+/// Use a small fixed number, because we can fit more PressureChanges in an
+/// empty SmallVector than ever need to be tracked per register class. If more
+/// PSets are affected, then we only track the most constrained.
+class PressureDiff {
+  // The initial design was for MaxPSets=4, but that requires PSet partitions,
+  // which are not yet implemented. (PSet partitions are equivalent PSets given
+  // the register classes actually in use within the scheduling region.)
+  enum { MaxPSets = 16 };
+
+  PressureChange PressureChanges[MaxPSets];
+
+  using iterator = PressureChange *;
+
+  iterator nonconst_begin() { return &PressureChanges[0]; }
+  iterator nonconst_end() { return &PressureChanges[MaxPSets]; }
+
+public:
+  using const_iterator = const PressureChange *;
+
+  const_iterator begin() const { return &PressureChanges[0]; }
+  const_iterator end() const { return &PressureChanges[MaxPSets]; }
+
+  void addPressureChange(unsigned RegUnit, bool IsDec,
+                         const MachineRegisterInfo *MRI);
+
+  void dump(const TargetRegisterInfo &TRI) const;
+};
+
+/// List of registers defined and used by a machine instruction.
+class RegisterOperands {
+public:
+  /// List of virtual registers and register units read by the instruction.
+  SmallVector<RegisterMaskPair, 8> Uses;
+  /// \brief List of virtual registers and register units defined by the
+  /// instruction which are not dead.
+  SmallVector<RegisterMaskPair, 8> Defs;
+  /// \brief List of virtual registers and register units defined by the
+  /// instruction but dead.
+  SmallVector<RegisterMaskPair, 8> DeadDefs;
+
+  /// Analyze the given instruction \p MI and fill in the Uses, Defs and
+  /// DeadDefs list based on the MachineOperand flags.
+  void collect(const MachineInstr &MI, const TargetRegisterInfo &TRI,
+               const MachineRegisterInfo &MRI, bool TrackLaneMasks,
+               bool IgnoreDead);
+
+  /// Use liveness information to find dead defs not marked with a dead flag
+  /// and move them to the DeadDefs vector.
+  void detectDeadDefs(const MachineInstr &MI, const LiveIntervals &LIS);
+
+  /// Use liveness information to find out which uses/defs are partially
+  /// undefined/dead and adjust the RegisterMaskPairs accordingly.
+  /// If \p AddFlagsMI is given then missing read-undef and dead flags will be
+  /// added to the instruction.
+  void adjustLaneLiveness(const LiveIntervals &LIS,
+                          const MachineRegisterInfo &MRI, SlotIndex Pos,
+                          MachineInstr *AddFlagsMI = nullptr);
+};
+
+/// Array of PressureDiffs.
+class PressureDiffs {
+  PressureDiff *PDiffArray = nullptr;
+  unsigned Size = 0;
+  unsigned Max = 0;
+
+public:
+  PressureDiffs() = default;
+  ~PressureDiffs() { free(PDiffArray); }
+
+  void clear() { Size = 0; }
+
+  void init(unsigned N);
+
+  PressureDiff &operator[](unsigned Idx) {
+    assert(Idx < Size && "PressureDiff index out of bounds");
+    return PDiffArray[Idx];
+  }
+  const PressureDiff &operator[](unsigned Idx) const {
+    return const_cast<PressureDiffs*>(this)->operator[](Idx);
+  }
+
+  /// \brief Record pressure difference induced by the given operand list to
+  /// node with index \p Idx.
+  void addInstruction(unsigned Idx, const RegisterOperands &RegOpers,
+                      const MachineRegisterInfo &MRI);
+};
+
+/// Store the effects of a change in pressure on things that MI scheduler cares
+/// about.
+///
+/// Excess records the value of the largest difference in register units beyond
+/// the target's pressure limits across the affected pressure sets, where
+/// largest is defined as the absolute value of the difference. Negative
+/// ExcessUnits indicates a reduction in pressure that had already exceeded the
+/// target's limits.
+///
+/// CriticalMax records the largest increase in the tracker's max pressure that
+/// exceeds the critical limit for some pressure set determined by the client.
+///
+/// CurrentMax records the largest increase in the tracker's max pressure that
+/// exceeds the current limit for some pressure set determined by the client.
+struct RegPressureDelta {
+  PressureChange Excess;
+  PressureChange CriticalMax;
+  PressureChange CurrentMax;
+
+  RegPressureDelta() = default;
+
+  bool operator==(const RegPressureDelta &RHS) const {
+    return Excess == RHS.Excess && CriticalMax == RHS.CriticalMax
+      && CurrentMax == RHS.CurrentMax;
+  }
+  bool operator!=(const RegPressureDelta &RHS) const {
+    return !operator==(RHS);
+  }
+};
+
+/// A set of live virtual registers and physical register units.
+///
+/// This is a wrapper around a SparseSet which deals with mapping register unit
+/// and virtual register indexes to an index usable by the sparse set.
+class LiveRegSet {
+private:
+  struct IndexMaskPair {
+    unsigned Index;
+    LaneBitmask LaneMask;
+
+    IndexMaskPair(unsigned Index, LaneBitmask LaneMask)
+        : Index(Index), LaneMask(LaneMask) {}
+
+    unsigned getSparseSetIndex() const {
+      return Index;
+    }
+  };
+
+  using RegSet = SparseSet<IndexMaskPair>;
+  RegSet Regs;
+  unsigned NumRegUnits;
+
+  unsigned getSparseIndexFromReg(unsigned Reg) const {
+    if (TargetRegisterInfo::isVirtualRegister(Reg))
+      return TargetRegisterInfo::virtReg2Index(Reg) + NumRegUnits;
+    assert(Reg < NumRegUnits);
+    return Reg;
+  }
+
+  unsigned getRegFromSparseIndex(unsigned SparseIndex) const {
+    if (SparseIndex >= NumRegUnits)
+      return TargetRegisterInfo::index2VirtReg(SparseIndex-NumRegUnits);
+    return SparseIndex;
+  }
+
+public:
+  void clear();
+  void init(const MachineRegisterInfo &MRI);
+
+  LaneBitmask contains(unsigned Reg) const {
+    unsigned SparseIndex = getSparseIndexFromReg(Reg);
+    RegSet::const_iterator I = Regs.find(SparseIndex);
+    if (I == Regs.end())
+      return LaneBitmask::getNone();
+    return I->LaneMask;
+  }
+
+  /// Mark the \p Pair.LaneMask lanes of \p Pair.Reg as live.
+  /// Returns the previously live lanes of \p Pair.Reg.
+  LaneBitmask insert(RegisterMaskPair Pair) {
+    unsigned SparseIndex = getSparseIndexFromReg(Pair.RegUnit);
+    auto InsertRes = Regs.insert(IndexMaskPair(SparseIndex, Pair.LaneMask));
+    if (!InsertRes.second) {
+      LaneBitmask PrevMask = InsertRes.first->LaneMask;
+      InsertRes.first->LaneMask |= Pair.LaneMask;
+      return PrevMask;
+    }
+    return LaneBitmask::getNone();
+  }
+
+  /// Clears the \p Pair.LaneMask lanes of \p Pair.Reg (mark them as dead).
+  /// Returns the previously live lanes of \p Pair.Reg.
+  LaneBitmask erase(RegisterMaskPair Pair) {
+    unsigned SparseIndex = getSparseIndexFromReg(Pair.RegUnit);
+    RegSet::iterator I = Regs.find(SparseIndex);
+    if (I == Regs.end())
+      return LaneBitmask::getNone();
+    LaneBitmask PrevMask = I->LaneMask;
+    I->LaneMask &= ~Pair.LaneMask;
+    return PrevMask;
+  }
+
+  size_t size() const {
+    return Regs.size();
+  }
+
+  template<typename ContainerT>
+  void appendTo(ContainerT &To) const {
+    for (const IndexMaskPair &P : Regs) {
+      unsigned Reg = getRegFromSparseIndex(P.Index);
+      if (P.LaneMask.any())
+        To.push_back(RegisterMaskPair(Reg, P.LaneMask));
+    }
+  }
+};
+
+/// Track the current register pressure at some position in the instruction
+/// stream, and remember the high water mark within the region traversed. This
+/// does not automatically consider live-through ranges. The client may
+/// independently adjust for global liveness.
+///
+/// Each RegPressureTracker only works within a MachineBasicBlock. Pressure can
+/// be tracked across a larger region by storing a RegisterPressure result at
+/// each block boundary and explicitly adjusting pressure to account for block
+/// live-in and live-out register sets.
+///
+/// RegPressureTracker holds a reference to a RegisterPressure result that it
+/// computes incrementally. During downward tracking, P.BottomIdx or P.BottomPos
+/// is invalid until it reaches the end of the block or closeRegion() is
+/// explicitly called. Similarly, P.TopIdx is invalid during upward
+/// tracking. Changing direction has the side effect of closing region, and
+/// traversing past TopIdx or BottomIdx reopens it.
+class RegPressureTracker {
+  const MachineFunction *MF = nullptr;
+  const TargetRegisterInfo *TRI = nullptr;
+  const RegisterClassInfo *RCI = nullptr;
+  const MachineRegisterInfo *MRI;
+  const LiveIntervals *LIS = nullptr;
+
+  /// We currently only allow pressure tracking within a block.
+  const MachineBasicBlock *MBB = nullptr;
+
+  /// Track the max pressure within the region traversed so far.
+  RegisterPressure &P;
+
+  /// Run in two modes dependending on whether constructed with IntervalPressure
+  /// or RegisterPressure. If requireIntervals is false, LIS are ignored.
+  bool RequireIntervals;
+
+  /// True if UntiedDefs will be populated.
+  bool TrackUntiedDefs = false;
+
+  /// True if lanemasks should be tracked.
+  bool TrackLaneMasks = false;
+
+  /// Register pressure corresponds to liveness before this instruction
+  /// iterator. It may point to the end of the block or a DebugValue rather than
+  /// an instruction.
+  MachineBasicBlock::const_iterator CurrPos;
+
+  /// Pressure map indexed by pressure set ID, not class ID.
+  std::vector<unsigned> CurrSetPressure;
+
+  /// Set of live registers.
+  LiveRegSet LiveRegs;
+
+  /// Set of vreg defs that start a live range.
+  SparseSet<unsigned, VirtReg2IndexFunctor> UntiedDefs;
+  /// Live-through pressure.
+  std::vector<unsigned> LiveThruPressure;
+
+public:
+  RegPressureTracker(IntervalPressure &rp) : P(rp), RequireIntervals(true) {}
+  RegPressureTracker(RegionPressure &rp) : P(rp), RequireIntervals(false) {}
+
+  void reset();
+
+  void init(const MachineFunction *mf, const RegisterClassInfo *rci,
+            const LiveIntervals *lis, const MachineBasicBlock *mbb,
+            MachineBasicBlock::const_iterator pos,
+            bool TrackLaneMasks, bool TrackUntiedDefs);
+
+  /// Force liveness of virtual registers or physical register
+  /// units. Particularly useful to initialize the livein/out state of the
+  /// tracker before the first call to advance/recede.
+  void addLiveRegs(ArrayRef<RegisterMaskPair> Regs);
+
+  /// Get the MI position corresponding to this register pressure.
+  MachineBasicBlock::const_iterator getPos() const { return CurrPos; }
+
+  // Reset the MI position corresponding to the register pressure. This allows
+  // schedulers to move instructions above the RegPressureTracker's
+  // CurrPos. Since the pressure is computed before CurrPos, the iterator
+  // position changes while pressure does not.
+  void setPos(MachineBasicBlock::const_iterator Pos) { CurrPos = Pos; }
+
+  /// Recede across the previous instruction.
+  void recede(SmallVectorImpl<RegisterMaskPair> *LiveUses = nullptr);
+
+  /// Recede across the previous instruction.
+  /// This "low-level" variant assumes that recedeSkipDebugValues() was
+  /// called previously and takes precomputed RegisterOperands for the
+  /// instruction.
+  void recede(const RegisterOperands &RegOpers,
+              SmallVectorImpl<RegisterMaskPair> *LiveUses = nullptr);
+
+  /// Recede until we find an instruction which is not a DebugValue.
+  void recedeSkipDebugValues();
+
+  /// Advance across the current instruction.
+  void advance();
+
+  /// Advance across the current instruction.
+  /// This is a "low-level" variant of advance() which takes precomputed
+  /// RegisterOperands of the instruction.
+  void advance(const RegisterOperands &RegOpers);
+
+  /// Finalize the region boundaries and recored live ins and live outs.
+  void closeRegion();
+
+  /// Initialize the LiveThru pressure set based on the untied defs found in
+  /// RPTracker.
+  void initLiveThru(const RegPressureTracker &RPTracker);
+
+  /// Copy an existing live thru pressure result.
+  void initLiveThru(ArrayRef<unsigned> PressureSet) {
+    LiveThruPressure.assign(PressureSet.begin(), PressureSet.end());
+  }
+
+  ArrayRef<unsigned> getLiveThru() const { return LiveThruPressure; }
+
+  /// Get the resulting register pressure over the traversed region.
+  /// This result is complete if closeRegion() was explicitly invoked.
+  RegisterPressure &getPressure() { return P; }
+  const RegisterPressure &getPressure() const { return P; }
+
+  /// Get the register set pressure at the current position, which may be less
+  /// than the pressure across the traversed region.
+  const std::vector<unsigned> &getRegSetPressureAtPos() const {
+    return CurrSetPressure;
+  }
+
+  bool isTopClosed() const;
+  bool isBottomClosed() const;
+
+  void closeTop();
+  void closeBottom();
+
+  /// Consider the pressure increase caused by traversing this instruction
+  /// bottom-up. Find the pressure set with the most change beyond its pressure
+  /// limit based on the tracker's current pressure, and record the number of
+  /// excess register units of that pressure set introduced by this instruction.
+  void getMaxUpwardPressureDelta(const MachineInstr *MI,
+                                 PressureDiff *PDiff,
+                                 RegPressureDelta &Delta,
+                                 ArrayRef<PressureChange> CriticalPSets,
+                                 ArrayRef<unsigned> MaxPressureLimit);
+
+  void getUpwardPressureDelta(const MachineInstr *MI,
+                              /*const*/ PressureDiff &PDiff,
+                              RegPressureDelta &Delta,
+                              ArrayRef<PressureChange> CriticalPSets,
+                              ArrayRef<unsigned> MaxPressureLimit) const;
+
+  /// Consider the pressure increase caused by traversing this instruction
+  /// top-down. Find the pressure set with the most change beyond its pressure
+  /// limit based on the tracker's current pressure, and record the number of
+  /// excess register units of that pressure set introduced by this instruction.
+  void getMaxDownwardPressureDelta(const MachineInstr *MI,
+                                   RegPressureDelta &Delta,
+                                   ArrayRef<PressureChange> CriticalPSets,
+                                   ArrayRef<unsigned> MaxPressureLimit);
+
+  /// Find the pressure set with the most change beyond its pressure limit after
+  /// traversing this instruction either upward or downward depending on the
+  /// closed end of the current region.
+  void getMaxPressureDelta(const MachineInstr *MI,
+                           RegPressureDelta &Delta,
+                           ArrayRef<PressureChange> CriticalPSets,
+                           ArrayRef<unsigned> MaxPressureLimit) {
+    if (isTopClosed())
+      return getMaxDownwardPressureDelta(MI, Delta, CriticalPSets,
+                                         MaxPressureLimit);
+
+    assert(isBottomClosed() && "Uninitialized pressure tracker");
+    return getMaxUpwardPressureDelta(MI, nullptr, Delta, CriticalPSets,
+                                     MaxPressureLimit);
+  }
+
+  /// Get the pressure of each PSet after traversing this instruction bottom-up.
+  void getUpwardPressure(const MachineInstr *MI,
+                         std::vector<unsigned> &PressureResult,
+                         std::vector<unsigned> &MaxPressureResult);
+
+  /// Get the pressure of each PSet after traversing this instruction top-down.
+  void getDownwardPressure(const MachineInstr *MI,
+                           std::vector<unsigned> &PressureResult,
+                           std::vector<unsigned> &MaxPressureResult);
+
+  void getPressureAfterInst(const MachineInstr *MI,
+                            std::vector<unsigned> &PressureResult,
+                            std::vector<unsigned> &MaxPressureResult) {
+    if (isTopClosed())
+      return getUpwardPressure(MI, PressureResult, MaxPressureResult);
+
+    assert(isBottomClosed() && "Uninitialized pressure tracker");
+    return getDownwardPressure(MI, PressureResult, MaxPressureResult);
+  }
+
+  bool hasUntiedDef(unsigned VirtReg) const {
+    return UntiedDefs.count(VirtReg);
+  }
+
+  void dump() const;
+
+protected:
+  /// Add Reg to the live out set and increase max pressure.
+  void discoverLiveOut(RegisterMaskPair Pair);
+  /// Add Reg to the live in set and increase max pressure.
+  void discoverLiveIn(RegisterMaskPair Pair);
+
+  /// \brief Get the SlotIndex for the first nondebug instruction including or
+  /// after the current position.
+  SlotIndex getCurrSlot() const;
+
+  void increaseRegPressure(unsigned RegUnit, LaneBitmask PreviousMask,
+                           LaneBitmask NewMask);
+  void decreaseRegPressure(unsigned RegUnit, LaneBitmask PreviousMask,
+                           LaneBitmask NewMask);
+
+  void bumpDeadDefs(ArrayRef<RegisterMaskPair> DeadDefs);
+
+  void bumpUpwardPressure(const MachineInstr *MI);
+  void bumpDownwardPressure(const MachineInstr *MI);
+
+  void discoverLiveInOrOut(RegisterMaskPair Pair,
+                           SmallVectorImpl<RegisterMaskPair> &LiveInOrOut);
+
+  LaneBitmask getLastUsedLanes(unsigned RegUnit, SlotIndex Pos) const;
+  LaneBitmask getLiveLanesAt(unsigned RegUnit, SlotIndex Pos) const;
+  LaneBitmask getLiveThroughAt(unsigned RegUnit, SlotIndex Pos) const;
+};
+
+void dumpRegSetPressure(ArrayRef<unsigned> SetPressure,
+                        const TargetRegisterInfo *TRI);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_REGISTERPRESSURE_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/RegisterScavenging.h b/linux-x64/clang/include/llvm/CodeGen/RegisterScavenging.h
new file mode 100644
index 0000000..489c72b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/RegisterScavenging.h
@@ -0,0 +1,231 @@
+//===- RegisterScavenging.h - Machine register scavenging -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file declares the machine register scavenger class. It can provide
+/// information such as unused register at any point in a machine basic block.
+/// It also provides a mechanism to make registers available by evicting them
+/// to spill slots.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGISTERSCAVENGING_H
+#define LLVM_CODEGEN_REGISTERSCAVENGING_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/LiveRegUnits.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/MC/LaneBitmask.h"
+
+namespace llvm {
+
+class MachineInstr;
+class TargetInstrInfo;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+class RegScavenger {
+  const TargetRegisterInfo *TRI;
+  const TargetInstrInfo *TII;
+  MachineRegisterInfo* MRI;
+  MachineBasicBlock *MBB = nullptr;
+  MachineBasicBlock::iterator MBBI;
+  unsigned NumRegUnits = 0;
+
+  /// True if RegScavenger is currently tracking the liveness of registers.
+  bool Tracking = false;
+
+  /// Information on scavenged registers (held in a spill slot).
+  struct ScavengedInfo {
+    ScavengedInfo(int FI = -1) : FrameIndex(FI) {}
+
+    /// A spill slot used for scavenging a register post register allocation.
+    int FrameIndex;
+
+    /// If non-zero, the specific register is currently being
+    /// scavenged. That is, it is spilled to this scavenging stack slot.
+    unsigned Reg = 0;
+
+    /// The instruction that restores the scavenged register from stack.
+    const MachineInstr *Restore = nullptr;
+  };
+
+  /// A vector of information on scavenged registers.
+  SmallVector<ScavengedInfo, 2> Scavenged;
+
+  LiveRegUnits LiveUnits;
+
+  // These BitVectors are only used internally to forward(). They are members
+  // to avoid frequent reallocations.
+  BitVector KillRegUnits, DefRegUnits;
+  BitVector TmpRegUnits;
+
+public:
+  RegScavenger() = default;
+
+  /// Start tracking liveness from the begin of basic block \p MBB.
+  void enterBasicBlock(MachineBasicBlock &MBB);
+
+  /// Start tracking liveness from the end of basic block \p MBB.
+  /// Use backward() to move towards the beginning of the block. This is
+  /// preferred to enterBasicBlock() and forward() because it does not depend
+  /// on the presence of kill flags.
+  void enterBasicBlockEnd(MachineBasicBlock &MBB);
+
+  /// Move the internal MBB iterator and update register states.
+  void forward();
+
+  /// Move the internal MBB iterator and update register states until
+  /// it has processed the specific iterator.
+  void forward(MachineBasicBlock::iterator I) {
+    if (!Tracking && MBB->begin() != I) forward();
+    while (MBBI != I) forward();
+  }
+
+  /// Invert the behavior of forward() on the current instruction (undo the
+  /// changes to the available registers made by forward()).
+  void unprocess();
+
+  /// Unprocess instructions until you reach the provided iterator.
+  void unprocess(MachineBasicBlock::iterator I) {
+    while (MBBI != I) unprocess();
+  }
+
+  /// Update internal register state and move MBB iterator backwards.
+  /// Contrary to unprocess() this method gives precise results even in the
+  /// absence of kill flags.
+  void backward();
+
+  /// Call backward() as long as the internal iterator does not point to \p I.
+  void backward(MachineBasicBlock::iterator I) {
+    while (MBBI != I)
+      backward();
+  }
+
+  /// Move the internal MBB iterator but do not update register states.
+  void skipTo(MachineBasicBlock::iterator I) {
+    if (I == MachineBasicBlock::iterator(nullptr))
+      Tracking = false;
+    MBBI = I;
+  }
+
+  MachineBasicBlock::iterator getCurrentPosition() const { return MBBI; }
+
+  /// Return if a specific register is currently used.
+  bool isRegUsed(unsigned Reg, bool includeReserved = true) const;
+
+  /// Return all available registers in the register class in Mask.
+  BitVector getRegsAvailable(const TargetRegisterClass *RC);
+
+  /// Find an unused register of the specified register class.
+  /// Return 0 if none is found.
+  unsigned FindUnusedReg(const TargetRegisterClass *RegClass) const;
+
+  /// Add a scavenging frame index.
+  void addScavengingFrameIndex(int FI) {
+    Scavenged.push_back(ScavengedInfo(FI));
+  }
+
+  /// Query whether a frame index is a scavenging frame index.
+  bool isScavengingFrameIndex(int FI) const {
+    for (SmallVectorImpl<ScavengedInfo>::const_iterator I = Scavenged.begin(),
+         IE = Scavenged.end(); I != IE; ++I)
+      if (I->FrameIndex == FI)
+        return true;
+
+    return false;
+  }
+
+  /// Get an array of scavenging frame indices.
+  void getScavengingFrameIndices(SmallVectorImpl<int> &A) const {
+    for (SmallVectorImpl<ScavengedInfo>::const_iterator I = Scavenged.begin(),
+         IE = Scavenged.end(); I != IE; ++I)
+      if (I->FrameIndex >= 0)
+        A.push_back(I->FrameIndex);
+  }
+
+  /// Make a register of the specific register class
+  /// available and do the appropriate bookkeeping. SPAdj is the stack
+  /// adjustment due to call frame, it's passed along to eliminateFrameIndex().
+  /// Returns the scavenged register.
+  /// This is deprecated as it depends on the quality of the kill flags being
+  /// present; Use scavengeRegisterBackwards() instead!
+  unsigned scavengeRegister(const TargetRegisterClass *RegClass,
+                            MachineBasicBlock::iterator I, int SPAdj);
+  unsigned scavengeRegister(const TargetRegisterClass *RegClass, int SPAdj) {
+    return scavengeRegister(RegClass, MBBI, SPAdj);
+  }
+
+  /// Make a register of the specific register class available from the current
+  /// position backwards to the place before \p To. If \p RestoreAfter is true
+  /// this includes the instruction following the current position.
+  /// SPAdj is the stack adjustment due to call frame, it's passed along to
+  /// eliminateFrameIndex().
+  /// Returns the scavenged register.
+  unsigned scavengeRegisterBackwards(const TargetRegisterClass &RC,
+                                     MachineBasicBlock::iterator To,
+                                     bool RestoreAfter, int SPAdj);
+
+  /// Tell the scavenger a register is used.
+  void setRegUsed(unsigned Reg, LaneBitmask LaneMask = LaneBitmask::getAll());
+
+private:
+  /// Returns true if a register is reserved. It is never "unused".
+  bool isReserved(unsigned Reg) const { return MRI->isReserved(Reg); }
+
+  /// setUsed / setUnused - Mark the state of one or a number of register units.
+  ///
+  void setUsed(const BitVector &RegUnits) {
+    LiveUnits.addUnits(RegUnits);
+  }
+  void setUnused(const BitVector &RegUnits) {
+    LiveUnits.removeUnits(RegUnits);
+  }
+
+  /// Processes the current instruction and fill the KillRegUnits and
+  /// DefRegUnits bit vectors.
+  void determineKillsAndDefs();
+
+  /// Add all Reg Units that Reg contains to BV.
+  void addRegUnits(BitVector &BV, unsigned Reg);
+
+  /// Remove all Reg Units that \p Reg contains from \p BV.
+  void removeRegUnits(BitVector &BV, unsigned Reg);
+
+  /// Return the candidate register that is unused for the longest after
+  /// StartMI. UseMI is set to the instruction where the search stopped.
+  ///
+  /// No more than InstrLimit instructions are inspected.
+  unsigned findSurvivorReg(MachineBasicBlock::iterator StartMI,
+                           BitVector &Candidates,
+                           unsigned InstrLimit,
+                           MachineBasicBlock::iterator &UseMI);
+
+  /// Initialize RegisterScavenger.
+  void init(MachineBasicBlock &MBB);
+
+  /// Mark live-in registers of basic block as used.
+  void setLiveInsUsed(const MachineBasicBlock &MBB);
+
+  /// Spill a register after position \p After and reload it before position
+  /// \p UseMI.
+  ScavengedInfo &spill(unsigned Reg, const TargetRegisterClass &RC, int SPAdj,
+                       MachineBasicBlock::iterator After,
+                       MachineBasicBlock::iterator &UseMI);
+};
+
+/// Replaces all frame index virtual registers with physical registers. Uses the
+/// register scavenger to find an appropriate register to use.
+void scavengeFrameVirtualRegs(MachineFunction &MF, RegScavenger &RS);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_REGISTERSCAVENGING_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/RegisterUsageInfo.h b/linux-x64/clang/include/llvm/CodeGen/RegisterUsageInfo.h
new file mode 100644
index 0000000..eabadd8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/RegisterUsageInfo.h
@@ -0,0 +1,77 @@
+//==- RegisterUsageInfo.h - Register Usage Informartion Storage --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This pass is required to take advantage of the interprocedural register
+/// allocation infrastructure.
+///
+/// This pass is simple immutable pass which keeps RegMasks (calculated based on
+/// actual register allocation) for functions in a module and provides simple
+/// API to query this information.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PHYSICALREGISTERUSAGEINFO_H
+#define LLVM_CODEGEN_PHYSICALREGISTERUSAGEINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/Pass.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+class Function;
+class TargetMachine;
+
+class PhysicalRegisterUsageInfo : public ImmutablePass {
+  virtual void anchor();
+
+public:
+  static char ID;
+
+  PhysicalRegisterUsageInfo() : ImmutablePass(ID) {
+    PassRegistry &Registry = *PassRegistry::getPassRegistry();
+    initializePhysicalRegisterUsageInfoPass(Registry);
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+  }
+
+  /// To set TargetMachine *, which is used to print
+  /// analysis when command line option -print-regusage is used.
+  void setTargetMachine(const TargetMachine *TM_) { TM = TM_; }
+
+  bool doInitialization(Module &M) override;
+
+  bool doFinalization(Module &M) override;
+
+  /// To store RegMask for given Function *.
+  void storeUpdateRegUsageInfo(const Function *FP,
+                               std::vector<uint32_t> RegMask);
+
+  /// To query stored RegMask for given Function *, it will return nullptr if
+  /// function is not known.
+  const std::vector<uint32_t> *getRegUsageInfo(const Function *FP);
+
+  void print(raw_ostream &OS, const Module *M = nullptr) const override;
+
+private:
+  /// A Dense map from Function * to RegMask.
+  /// In RegMask 0 means register used (clobbered) by function.
+  /// and 1 means content of register will be preserved around function call.
+  DenseMap<const Function *, std::vector<uint32_t>> RegMasks;
+
+  const TargetMachine *TM;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_PHYSICALREGISTERUSAGEINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ResourcePriorityQueue.h b/linux-x64/clang/include/llvm/CodeGen/ResourcePriorityQueue.h
new file mode 100644
index 0000000..03166cc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ResourcePriorityQueue.h
@@ -0,0 +1,136 @@
+//===----- ResourcePriorityQueue.h - A DFA-oriented priority queue -------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ResourcePriorityQueue class, which is a
+// SchedulingPriorityQueue that schedules using DFA state to
+// reduce the length of the critical path through the basic block
+// on VLIW platforms.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_RESOURCEPRIORITYQUEUE_H
+#define LLVM_CODEGEN_RESOURCEPRIORITYQUEUE_H
+
+#include "llvm/CodeGen/DFAPacketizer.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/MC/MCInstrItineraries.h"
+
+namespace llvm {
+  class ResourcePriorityQueue;
+
+  /// Sorting functions for the Available queue.
+  struct resource_sort {
+    ResourcePriorityQueue *PQ;
+    explicit resource_sort(ResourcePriorityQueue *pq) : PQ(pq) {}
+
+    bool operator()(const SUnit* left, const SUnit* right) const;
+  };
+
+  class ResourcePriorityQueue : public SchedulingPriorityQueue {
+    /// SUnits - The SUnits for the current graph.
+    std::vector<SUnit> *SUnits;
+
+    /// NumNodesSolelyBlocking - This vector contains, for every node in the
+    /// Queue, the number of nodes that the node is the sole unscheduled
+    /// predecessor for.  This is used as a tie-breaker heuristic for better
+    /// mobility.
+    std::vector<unsigned> NumNodesSolelyBlocking;
+
+    /// Queue - The queue.
+    std::vector<SUnit*> Queue;
+
+    /// RegPressure - Tracking current reg pressure per register class.
+    ///
+    std::vector<unsigned> RegPressure;
+
+    /// RegLimit - Tracking the number of allocatable registers per register
+    /// class.
+    std::vector<unsigned> RegLimit;
+
+    resource_sort Picker;
+    const TargetRegisterInfo *TRI;
+    const TargetLowering *TLI;
+    const TargetInstrInfo *TII;
+    const InstrItineraryData* InstrItins;
+    /// ResourcesModel - Represents VLIW state.
+    /// Not limited to VLIW targets per say, but assumes
+    /// definition of DFA by a target.
+    std::unique_ptr<DFAPacketizer> ResourcesModel;
+
+    /// Resource model - packet/bundle model. Purely
+    /// internal at the time.
+    std::vector<SUnit*> Packet;
+
+    /// Heuristics for estimating register pressure.
+    unsigned ParallelLiveRanges;
+    int HorizontalVerticalBalance;
+
+  public:
+    ResourcePriorityQueue(SelectionDAGISel *IS);
+
+    bool isBottomUp() const override { return false; }
+
+    void initNodes(std::vector<SUnit> &sunits) override;
+
+    void addNode(const SUnit *SU) override {
+      NumNodesSolelyBlocking.resize(SUnits->size(), 0);
+    }
+
+    void updateNode(const SUnit *SU) override {}
+
+    void releaseState() override {
+      SUnits = nullptr;
+    }
+
+    unsigned getLatency(unsigned NodeNum) const {
+      assert(NodeNum < (*SUnits).size());
+      return (*SUnits)[NodeNum].getHeight();
+    }
+
+    unsigned getNumSolelyBlockNodes(unsigned NodeNum) const {
+      assert(NodeNum < NumNodesSolelyBlocking.size());
+      return NumNodesSolelyBlocking[NodeNum];
+    }
+
+    /// Single cost function reflecting benefit of scheduling SU
+    /// in the current cycle.
+    int SUSchedulingCost (SUnit *SU);
+
+    /// InitNumRegDefsLeft - Determine the # of regs defined by this node.
+    ///
+    void initNumRegDefsLeft(SUnit *SU);
+    void updateNumRegDefsLeft(SUnit *SU);
+    int regPressureDelta(SUnit *SU, bool RawPressure = false);
+    int rawRegPressureDelta (SUnit *SU, unsigned RCId);
+
+    bool empty() const override { return Queue.empty(); }
+
+    void push(SUnit *U) override;
+
+    SUnit *pop() override;
+
+    void remove(SUnit *SU) override;
+
+    /// scheduledNode - Main resource tracking point.
+    void scheduledNode(SUnit *Node) override;
+    bool isResourceAvailable(SUnit *SU);
+    void reserveResources(SUnit *SU);
+
+private:
+    void adjustPriorityOfUnscheduledPreds(SUnit *SU);
+    SUnit *getSingleUnscheduledPred(SUnit *SU);
+    unsigned numberRCValPredInSU (SUnit *SU, unsigned RCId);
+    unsigned numberRCValSuccInSU (SUnit *SU, unsigned RCId);
+  };
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/RuntimeLibcalls.def b/linux-x64/clang/include/llvm/CodeGen/RuntimeLibcalls.def
new file mode 100644
index 0000000..7ed90d9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/RuntimeLibcalls.def
@@ -0,0 +1,527 @@
+//===-- llvm/RuntimeLibcalls.def - File that describes libcalls -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the runtime library calls the backend can emit.
+// The various long double types cannot be merged, because 80-bit library
+// functions use "xf" and 128-bit use "tf".
+//
+// When adding PPCF128 functions here, note that their names generally need
+// to be overridden for Darwin with the xxx$LDBL128 form.  See
+// PPCISelLowering.cpp.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+// Provide definitions of macros so that users of this file do not have to
+// define everything to use it...
+
+// Declare the enumerator for each libcall, along with its default name. Some
+// libcalls have different names on particular OSes or architectures. These
+// are set in InitLibcallNames() in TargetLoweringBase.cpp and/or by targets
+// using TargetLoweringBase::setLibcallName()
+#ifndef HANDLE_LIBCALL
+#error "HANDLE_LIBCALL must be defined"
+#endif
+
+// Integer
+HANDLE_LIBCALL(SHL_I16, "__ashlhi3")
+HANDLE_LIBCALL(SHL_I32, "__ashlsi3")
+HANDLE_LIBCALL(SHL_I64, "__ashldi3")
+HANDLE_LIBCALL(SHL_I128, "__ashlti3")
+HANDLE_LIBCALL(SRL_I16, "__lshrhi3")
+HANDLE_LIBCALL(SRL_I32, "__lshrsi3")
+HANDLE_LIBCALL(SRL_I64, "__lshrdi3")
+HANDLE_LIBCALL(SRL_I128, "__lshrti3")
+HANDLE_LIBCALL(SRA_I16, "__ashrhi3")
+HANDLE_LIBCALL(SRA_I32, "__ashrsi3")
+HANDLE_LIBCALL(SRA_I64, "__ashrdi3")
+HANDLE_LIBCALL(SRA_I128, "__ashrti3")
+HANDLE_LIBCALL(MUL_I8, "__mulqi3")
+HANDLE_LIBCALL(MUL_I16, "__mulhi3")
+HANDLE_LIBCALL(MUL_I32, "__mulsi3")
+HANDLE_LIBCALL(MUL_I64, "__muldi3")
+HANDLE_LIBCALL(MUL_I128, "__multi3")
+HANDLE_LIBCALL(MULO_I32, "__mulosi4")
+HANDLE_LIBCALL(MULO_I64, "__mulodi4")
+HANDLE_LIBCALL(MULO_I128, "__muloti4")
+HANDLE_LIBCALL(SDIV_I8, "__divqi3")
+HANDLE_LIBCALL(SDIV_I16, "__divhi3")
+HANDLE_LIBCALL(SDIV_I32, "__divsi3")
+HANDLE_LIBCALL(SDIV_I64, "__divdi3")
+HANDLE_LIBCALL(SDIV_I128, "__divti3")
+HANDLE_LIBCALL(UDIV_I8, "__udivqi3")
+HANDLE_LIBCALL(UDIV_I16, "__udivhi3")
+HANDLE_LIBCALL(UDIV_I32, "__udivsi3")
+HANDLE_LIBCALL(UDIV_I64, "__udivdi3")
+HANDLE_LIBCALL(UDIV_I128, "__udivti3")
+HANDLE_LIBCALL(SREM_I8, "__modqi3")
+HANDLE_LIBCALL(SREM_I16, "__modhi3")
+HANDLE_LIBCALL(SREM_I32, "__modsi3")
+HANDLE_LIBCALL(SREM_I64, "__moddi3")
+HANDLE_LIBCALL(SREM_I128, "__modti3")
+HANDLE_LIBCALL(UREM_I8, "__umodqi3")
+HANDLE_LIBCALL(UREM_I16, "__umodhi3")
+HANDLE_LIBCALL(UREM_I32, "__umodsi3")
+HANDLE_LIBCALL(UREM_I64, "__umoddi3")
+HANDLE_LIBCALL(UREM_I128, "__umodti3")
+HANDLE_LIBCALL(SDIVREM_I8, nullptr)
+HANDLE_LIBCALL(SDIVREM_I16, nullptr)
+HANDLE_LIBCALL(SDIVREM_I32, nullptr)
+HANDLE_LIBCALL(SDIVREM_I64, nullptr)
+HANDLE_LIBCALL(SDIVREM_I128, nullptr)
+HANDLE_LIBCALL(UDIVREM_I8, nullptr)
+HANDLE_LIBCALL(UDIVREM_I16, nullptr)
+HANDLE_LIBCALL(UDIVREM_I32, nullptr)
+HANDLE_LIBCALL(UDIVREM_I64, nullptr)
+HANDLE_LIBCALL(UDIVREM_I128, nullptr)
+HANDLE_LIBCALL(NEG_I32, "__negsi2")
+HANDLE_LIBCALL(NEG_I64, "__negdi2")
+
+// Floating-point
+HANDLE_LIBCALL(ADD_F32, "__addsf3")
+HANDLE_LIBCALL(ADD_F64, "__adddf3")
+HANDLE_LIBCALL(ADD_F80, "__addxf3")
+HANDLE_LIBCALL(ADD_F128, "__addtf3")
+HANDLE_LIBCALL(ADD_PPCF128, "__gcc_qadd")
+HANDLE_LIBCALL(SUB_F32, "__subsf3")
+HANDLE_LIBCALL(SUB_F64, "__subdf3")
+HANDLE_LIBCALL(SUB_F80, "__subxf3")
+HANDLE_LIBCALL(SUB_F128, "__subtf3")
+HANDLE_LIBCALL(SUB_PPCF128, "__gcc_qsub")
+HANDLE_LIBCALL(MUL_F32, "__mulsf3")
+HANDLE_LIBCALL(MUL_F64, "__muldf3")
+HANDLE_LIBCALL(MUL_F80, "__mulxf3")
+HANDLE_LIBCALL(MUL_F128, "__multf3")
+HANDLE_LIBCALL(MUL_PPCF128, "__gcc_qmul")
+HANDLE_LIBCALL(DIV_F32, "__divsf3")
+HANDLE_LIBCALL(DIV_F64, "__divdf3")
+HANDLE_LIBCALL(DIV_F80, "__divxf3")
+HANDLE_LIBCALL(DIV_F128, "__divtf3")
+HANDLE_LIBCALL(DIV_PPCF128, "__gcc_qdiv")
+HANDLE_LIBCALL(REM_F32, "fmodf")
+HANDLE_LIBCALL(REM_F64, "fmod")
+HANDLE_LIBCALL(REM_F80, "fmodl")
+HANDLE_LIBCALL(REM_F128, "fmodl")
+HANDLE_LIBCALL(REM_PPCF128, "fmodl")
+HANDLE_LIBCALL(FMA_F32, "fmaf")
+HANDLE_LIBCALL(FMA_F64, "fma")
+HANDLE_LIBCALL(FMA_F80, "fmal")
+HANDLE_LIBCALL(FMA_F128, "fmal")
+HANDLE_LIBCALL(FMA_PPCF128, "fmal")
+HANDLE_LIBCALL(POWI_F32, "__powisf2")
+HANDLE_LIBCALL(POWI_F64, "__powidf2")
+HANDLE_LIBCALL(POWI_F80, "__powixf2")
+HANDLE_LIBCALL(POWI_F128, "__powitf2")
+HANDLE_LIBCALL(POWI_PPCF128, "__powitf2")
+HANDLE_LIBCALL(SQRT_F32, "sqrtf")
+HANDLE_LIBCALL(SQRT_F64, "sqrt")
+HANDLE_LIBCALL(SQRT_F80, "sqrtl")
+HANDLE_LIBCALL(SQRT_F128, "sqrtl")
+HANDLE_LIBCALL(SQRT_PPCF128, "sqrtl")
+HANDLE_LIBCALL(LOG_F32, "logf")
+HANDLE_LIBCALL(LOG_F64, "log")
+HANDLE_LIBCALL(LOG_F80, "logl")
+HANDLE_LIBCALL(LOG_F128, "logl")
+HANDLE_LIBCALL(LOG_PPCF128, "logl")
+HANDLE_LIBCALL(LOG_FINITE_F32, "__logf_finite")
+HANDLE_LIBCALL(LOG_FINITE_F64, "__log_finite")
+HANDLE_LIBCALL(LOG_FINITE_F80, "__logl_finite")
+HANDLE_LIBCALL(LOG_FINITE_F128, "__logl_finite")
+HANDLE_LIBCALL(LOG_FINITE_PPCF128, "__logl_finite")
+HANDLE_LIBCALL(LOG2_F32, "log2f")
+HANDLE_LIBCALL(LOG2_F64, "log2")
+HANDLE_LIBCALL(LOG2_F80, "log2l")
+HANDLE_LIBCALL(LOG2_F128, "log2l")
+HANDLE_LIBCALL(LOG2_PPCF128, "log2l")
+HANDLE_LIBCALL(LOG2_FINITE_F32, "__log2f_finite")
+HANDLE_LIBCALL(LOG2_FINITE_F64, "__log2_finite")
+HANDLE_LIBCALL(LOG2_FINITE_F80, "__log2l_finite")
+HANDLE_LIBCALL(LOG2_FINITE_F128, "__log2l_finite")
+HANDLE_LIBCALL(LOG2_FINITE_PPCF128, "__log2l_finite")
+HANDLE_LIBCALL(LOG10_F32, "log10f")
+HANDLE_LIBCALL(LOG10_F64, "log10")
+HANDLE_LIBCALL(LOG10_F80, "log10l")
+HANDLE_LIBCALL(LOG10_F128, "log10l")
+HANDLE_LIBCALL(LOG10_PPCF128, "log10l")
+HANDLE_LIBCALL(LOG10_FINITE_F32, "__log10f_finite")
+HANDLE_LIBCALL(LOG10_FINITE_F64, "__log10_finite")
+HANDLE_LIBCALL(LOG10_FINITE_F80, "__log10l_finite")
+HANDLE_LIBCALL(LOG10_FINITE_F128, "__log10l_finite")
+HANDLE_LIBCALL(LOG10_FINITE_PPCF128, "__log10l_finite")
+HANDLE_LIBCALL(EXP_F32, "expf")
+HANDLE_LIBCALL(EXP_F64, "exp")
+HANDLE_LIBCALL(EXP_F80, "expl")
+HANDLE_LIBCALL(EXP_F128, "expl")
+HANDLE_LIBCALL(EXP_PPCF128, "expl")
+HANDLE_LIBCALL(EXP_FINITE_F32, "__expf_finite")
+HANDLE_LIBCALL(EXP_FINITE_F64, "__exp_finite")
+HANDLE_LIBCALL(EXP_FINITE_F80, "__expl_finite")
+HANDLE_LIBCALL(EXP_FINITE_F128, "__expl_finite")
+HANDLE_LIBCALL(EXP_FINITE_PPCF128, "__expl_finite")
+HANDLE_LIBCALL(EXP2_F32, "exp2f")
+HANDLE_LIBCALL(EXP2_F64, "exp2")
+HANDLE_LIBCALL(EXP2_F80, "exp2l")
+HANDLE_LIBCALL(EXP2_F128, "exp2l")
+HANDLE_LIBCALL(EXP2_PPCF128, "exp2l")
+HANDLE_LIBCALL(EXP2_FINITE_F32, "__exp2f_finite")
+HANDLE_LIBCALL(EXP2_FINITE_F64, "__exp2_finite")
+HANDLE_LIBCALL(EXP2_FINITE_F80, "__exp2l_finite")
+HANDLE_LIBCALL(EXP2_FINITE_F128, "__exp2l_finite")
+HANDLE_LIBCALL(EXP2_FINITE_PPCF128, "__exp2l_finite")
+HANDLE_LIBCALL(SIN_F32, "sinf")
+HANDLE_LIBCALL(SIN_F64, "sin")
+HANDLE_LIBCALL(SIN_F80, "sinl")
+HANDLE_LIBCALL(SIN_F128, "sinl")
+HANDLE_LIBCALL(SIN_PPCF128, "sinl")
+HANDLE_LIBCALL(COS_F32, "cosf")
+HANDLE_LIBCALL(COS_F64, "cos")
+HANDLE_LIBCALL(COS_F80, "cosl")
+HANDLE_LIBCALL(COS_F128, "cosl")
+HANDLE_LIBCALL(COS_PPCF128, "cosl")
+HANDLE_LIBCALL(SINCOS_F32, nullptr)
+HANDLE_LIBCALL(SINCOS_F64, nullptr)
+HANDLE_LIBCALL(SINCOS_F80, nullptr)
+HANDLE_LIBCALL(SINCOS_F128, nullptr)
+HANDLE_LIBCALL(SINCOS_PPCF128, nullptr)
+HANDLE_LIBCALL(SINCOS_STRET_F32, nullptr)
+HANDLE_LIBCALL(SINCOS_STRET_F64, nullptr)
+HANDLE_LIBCALL(POW_F32, "powf")
+HANDLE_LIBCALL(POW_F64, "pow")
+HANDLE_LIBCALL(POW_F80, "powl")
+HANDLE_LIBCALL(POW_F128, "powl")
+HANDLE_LIBCALL(POW_PPCF128, "powl")
+HANDLE_LIBCALL(POW_FINITE_F32, "__powf_finite")
+HANDLE_LIBCALL(POW_FINITE_F64, "__pow_finite")
+HANDLE_LIBCALL(POW_FINITE_F80, "__powl_finite")
+HANDLE_LIBCALL(POW_FINITE_F128, "__powl_finite")
+HANDLE_LIBCALL(POW_FINITE_PPCF128, "__powl_finite")
+HANDLE_LIBCALL(CEIL_F32, "ceilf")
+HANDLE_LIBCALL(CEIL_F64, "ceil")
+HANDLE_LIBCALL(CEIL_F80, "ceill")
+HANDLE_LIBCALL(CEIL_F128, "ceill")
+HANDLE_LIBCALL(CEIL_PPCF128, "ceill")
+HANDLE_LIBCALL(TRUNC_F32, "truncf")
+HANDLE_LIBCALL(TRUNC_F64, "trunc")
+HANDLE_LIBCALL(TRUNC_F80, "truncl")
+HANDLE_LIBCALL(TRUNC_F128, "truncl")
+HANDLE_LIBCALL(TRUNC_PPCF128, "truncl")
+HANDLE_LIBCALL(RINT_F32, "rintf")
+HANDLE_LIBCALL(RINT_F64, "rint")
+HANDLE_LIBCALL(RINT_F80, "rintl")
+HANDLE_LIBCALL(RINT_F128, "rintl")
+HANDLE_LIBCALL(RINT_PPCF128, "rintl")
+HANDLE_LIBCALL(NEARBYINT_F32, "nearbyintf")
+HANDLE_LIBCALL(NEARBYINT_F64, "nearbyint")
+HANDLE_LIBCALL(NEARBYINT_F80, "nearbyintl")
+HANDLE_LIBCALL(NEARBYINT_F128, "nearbyintl")
+HANDLE_LIBCALL(NEARBYINT_PPCF128, "nearbyintl")
+HANDLE_LIBCALL(ROUND_F32, "roundf")
+HANDLE_LIBCALL(ROUND_F64, "round")
+HANDLE_LIBCALL(ROUND_F80, "roundl")
+HANDLE_LIBCALL(ROUND_F128, "roundl")
+HANDLE_LIBCALL(ROUND_PPCF128, "roundl")
+HANDLE_LIBCALL(FLOOR_F32, "floorf")
+HANDLE_LIBCALL(FLOOR_F64, "floor")
+HANDLE_LIBCALL(FLOOR_F80, "floorl")
+HANDLE_LIBCALL(FLOOR_F128, "floorl")
+HANDLE_LIBCALL(FLOOR_PPCF128, "floorl")
+HANDLE_LIBCALL(COPYSIGN_F32, "copysignf")
+HANDLE_LIBCALL(COPYSIGN_F64, "copysign")
+HANDLE_LIBCALL(COPYSIGN_F80, "copysignl")
+HANDLE_LIBCALL(COPYSIGN_F128, "copysignl")
+HANDLE_LIBCALL(COPYSIGN_PPCF128, "copysignl")
+HANDLE_LIBCALL(FMIN_F32, "fminf")
+HANDLE_LIBCALL(FMIN_F64, "fmin")
+HANDLE_LIBCALL(FMIN_F80, "fminl")
+HANDLE_LIBCALL(FMIN_F128, "fminl")
+HANDLE_LIBCALL(FMIN_PPCF128, "fminl")
+HANDLE_LIBCALL(FMAX_F32, "fmaxf")
+HANDLE_LIBCALL(FMAX_F64, "fmax")
+HANDLE_LIBCALL(FMAX_F80, "fmaxl")
+HANDLE_LIBCALL(FMAX_F128, "fmaxl")
+HANDLE_LIBCALL(FMAX_PPCF128, "fmaxl")
+
+// Conversion
+HANDLE_LIBCALL(FPEXT_F32_PPCF128, "__gcc_stoq")
+HANDLE_LIBCALL(FPEXT_F64_PPCF128, "__gcc_dtoq")
+HANDLE_LIBCALL(FPEXT_F80_F128, "__extendxftf2")
+HANDLE_LIBCALL(FPEXT_F64_F128, "__extenddftf2")
+HANDLE_LIBCALL(FPEXT_F32_F128, "__extendsftf2")
+HANDLE_LIBCALL(FPEXT_F32_F64, "__extendsfdf2")
+HANDLE_LIBCALL(FPEXT_F16_F32, "__gnu_h2f_ieee")
+HANDLE_LIBCALL(FPROUND_F32_F16, "__gnu_f2h_ieee")
+HANDLE_LIBCALL(FPROUND_F64_F16, "__truncdfhf2")
+HANDLE_LIBCALL(FPROUND_F80_F16, "__truncxfhf2")
+HANDLE_LIBCALL(FPROUND_F128_F16, "__trunctfhf2")
+HANDLE_LIBCALL(FPROUND_PPCF128_F16, "__trunctfhf2")
+HANDLE_LIBCALL(FPROUND_F64_F32, "__truncdfsf2")
+HANDLE_LIBCALL(FPROUND_F80_F32, "__truncxfsf2")
+HANDLE_LIBCALL(FPROUND_F128_F32, "__trunctfsf2")
+HANDLE_LIBCALL(FPROUND_PPCF128_F32, "__gcc_qtos")
+HANDLE_LIBCALL(FPROUND_F80_F64, "__truncxfdf2")
+HANDLE_LIBCALL(FPROUND_F128_F64, "__trunctfdf2")
+HANDLE_LIBCALL(FPROUND_PPCF128_F64, "__gcc_qtod")
+HANDLE_LIBCALL(FPROUND_F128_F80, "__trunctfxf2")
+HANDLE_LIBCALL(FPTOSINT_F32_I32, "__fixsfsi")
+HANDLE_LIBCALL(FPTOSINT_F32_I64, "__fixsfdi")
+HANDLE_LIBCALL(FPTOSINT_F32_I128, "__fixsfti")
+HANDLE_LIBCALL(FPTOSINT_F64_I32, "__fixdfsi")
+HANDLE_LIBCALL(FPTOSINT_F64_I64, "__fixdfdi")
+HANDLE_LIBCALL(FPTOSINT_F64_I128, "__fixdfti")
+HANDLE_LIBCALL(FPTOSINT_F80_I32, "__fixxfsi")
+HANDLE_LIBCALL(FPTOSINT_F80_I64, "__fixxfdi")
+HANDLE_LIBCALL(FPTOSINT_F80_I128, "__fixxfti")
+HANDLE_LIBCALL(FPTOSINT_F128_I32, "__fixtfsi")
+HANDLE_LIBCALL(FPTOSINT_F128_I64, "__fixtfdi")
+HANDLE_LIBCALL(FPTOSINT_F128_I128, "__fixtfti")
+HANDLE_LIBCALL(FPTOSINT_PPCF128_I32, "__gcc_qtou")
+HANDLE_LIBCALL(FPTOSINT_PPCF128_I64, "__fixtfdi")
+HANDLE_LIBCALL(FPTOSINT_PPCF128_I128, "__fixtfti")
+HANDLE_LIBCALL(FPTOUINT_F32_I32, "__fixunssfsi")
+HANDLE_LIBCALL(FPTOUINT_F32_I64, "__fixunssfdi")
+HANDLE_LIBCALL(FPTOUINT_F32_I128, "__fixunssfti")
+HANDLE_LIBCALL(FPTOUINT_F64_I32, "__fixunsdfsi")
+HANDLE_LIBCALL(FPTOUINT_F64_I64, "__fixunsdfdi")
+HANDLE_LIBCALL(FPTOUINT_F64_I128, "__fixunsdfti")
+HANDLE_LIBCALL(FPTOUINT_F80_I32, "__fixunsxfsi")
+HANDLE_LIBCALL(FPTOUINT_F80_I64, "__fixunsxfdi")
+HANDLE_LIBCALL(FPTOUINT_F80_I128, "__fixunsxfti")
+HANDLE_LIBCALL(FPTOUINT_F128_I32, "__fixunstfsi")
+HANDLE_LIBCALL(FPTOUINT_F128_I64, "__fixunstfdi")
+HANDLE_LIBCALL(FPTOUINT_F128_I128, "__fixunstfti")
+HANDLE_LIBCALL(FPTOUINT_PPCF128_I32, "__fixunstfsi")
+HANDLE_LIBCALL(FPTOUINT_PPCF128_I64, "__fixunstfdi")
+HANDLE_LIBCALL(FPTOUINT_PPCF128_I128, "__fixunstfti")
+HANDLE_LIBCALL(SINTTOFP_I32_F32, "__floatsisf")
+HANDLE_LIBCALL(SINTTOFP_I32_F64, "__floatsidf")
+HANDLE_LIBCALL(SINTTOFP_I32_F80, "__floatsixf")
+HANDLE_LIBCALL(SINTTOFP_I32_F128, "__floatsitf")
+HANDLE_LIBCALL(SINTTOFP_I32_PPCF128, "__gcc_itoq")
+HANDLE_LIBCALL(SINTTOFP_I64_F32, "__floatdisf")
+HANDLE_LIBCALL(SINTTOFP_I64_F64, "__floatdidf")
+HANDLE_LIBCALL(SINTTOFP_I64_F80, "__floatdixf")
+HANDLE_LIBCALL(SINTTOFP_I64_F128, "__floatditf")
+HANDLE_LIBCALL(SINTTOFP_I64_PPCF128, "__floatditf")
+HANDLE_LIBCALL(SINTTOFP_I128_F32, "__floattisf")
+HANDLE_LIBCALL(SINTTOFP_I128_F64, "__floattidf")
+HANDLE_LIBCALL(SINTTOFP_I128_F80, "__floattixf")
+HANDLE_LIBCALL(SINTTOFP_I128_F128, "__floattitf")
+HANDLE_LIBCALL(SINTTOFP_I128_PPCF128, "__floattitf")
+HANDLE_LIBCALL(UINTTOFP_I32_F32, "__floatunsisf")
+HANDLE_LIBCALL(UINTTOFP_I32_F64, "__floatunsidf")
+HANDLE_LIBCALL(UINTTOFP_I32_F80, "__floatunsixf")
+HANDLE_LIBCALL(UINTTOFP_I32_F128, "__floatunsitf")
+HANDLE_LIBCALL(UINTTOFP_I32_PPCF128, "__gcc_utoq")
+HANDLE_LIBCALL(UINTTOFP_I64_F32, "__floatundisf")
+HANDLE_LIBCALL(UINTTOFP_I64_F64, "__floatundidf")
+HANDLE_LIBCALL(UINTTOFP_I64_F80, "__floatundixf")
+HANDLE_LIBCALL(UINTTOFP_I64_F128, "__floatunditf")
+HANDLE_LIBCALL(UINTTOFP_I64_PPCF128, "__floatunditf")
+HANDLE_LIBCALL(UINTTOFP_I128_F32, "__floatuntisf")
+HANDLE_LIBCALL(UINTTOFP_I128_F64, "__floatuntidf")
+HANDLE_LIBCALL(UINTTOFP_I128_F80, "__floatuntixf")
+HANDLE_LIBCALL(UINTTOFP_I128_F128, "__floatuntitf")
+HANDLE_LIBCALL(UINTTOFP_I128_PPCF128, "__floatuntitf")
+
+// Comparison
+HANDLE_LIBCALL(OEQ_F32, "__eqsf2")
+HANDLE_LIBCALL(OEQ_F64, "__eqdf2")
+HANDLE_LIBCALL(OEQ_F128, "__eqtf2")
+HANDLE_LIBCALL(OEQ_PPCF128, "__gcc_qeq")
+HANDLE_LIBCALL(UNE_F32, "__nesf2")
+HANDLE_LIBCALL(UNE_F64, "__nedf2")
+HANDLE_LIBCALL(UNE_F128, "__netf2")
+HANDLE_LIBCALL(UNE_PPCF128, "__gcc_qne")
+HANDLE_LIBCALL(OGE_F32, "__gesf2")
+HANDLE_LIBCALL(OGE_F64, "__gedf2")
+HANDLE_LIBCALL(OGE_F128, "__getf2")
+HANDLE_LIBCALL(OGE_PPCF128, "__gcc_qge")
+HANDLE_LIBCALL(OLT_F32, "__ltsf2")
+HANDLE_LIBCALL(OLT_F64, "__ltdf2")
+HANDLE_LIBCALL(OLT_F128, "__lttf2")
+HANDLE_LIBCALL(OLT_PPCF128, "__gcc_qlt")
+HANDLE_LIBCALL(OLE_F32, "__lesf2")
+HANDLE_LIBCALL(OLE_F64, "__ledf2")
+HANDLE_LIBCALL(OLE_F128, "__letf2")
+HANDLE_LIBCALL(OLE_PPCF128, "__gcc_qle")
+HANDLE_LIBCALL(OGT_F32, "__gtsf2")
+HANDLE_LIBCALL(OGT_F64, "__gtdf2")
+HANDLE_LIBCALL(OGT_F128, "__gttf2")
+HANDLE_LIBCALL(OGT_PPCF128, "__gcc_qgt")
+HANDLE_LIBCALL(UO_F32, "__unordsf2")
+HANDLE_LIBCALL(UO_F64, "__unorddf2")
+HANDLE_LIBCALL(UO_F128, "__unordtf2")
+HANDLE_LIBCALL(UO_PPCF128, "__gcc_qunord")
+HANDLE_LIBCALL(O_F32, "__unordsf2")
+HANDLE_LIBCALL(O_F64, "__unorddf2")
+HANDLE_LIBCALL(O_F128, "__unordtf2")
+HANDLE_LIBCALL(O_PPCF128, "__gcc_qunord")
+
+// Memory
+HANDLE_LIBCALL(MEMCPY, "memcpy")
+HANDLE_LIBCALL(MEMMOVE, "memmove")
+HANDLE_LIBCALL(MEMSET, "memset")
+HANDLE_LIBCALL(BZERO, nullptr)
+
+// Element-wise unordered-atomic memory of different sizes
+HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_1, "__llvm_memcpy_element_unordered_atomic_1")
+HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_2, "__llvm_memcpy_element_unordered_atomic_2")
+HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_4, "__llvm_memcpy_element_unordered_atomic_4")
+HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_8, "__llvm_memcpy_element_unordered_atomic_8")
+HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_16, "__llvm_memcpy_element_unordered_atomic_16")
+HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1, "__llvm_memmove_element_unordered_atomic_1")
+HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2, "__llvm_memmove_element_unordered_atomic_2")
+HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4, "__llvm_memmove_element_unordered_atomic_4")
+HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8, "__llvm_memmove_element_unordered_atomic_8")
+HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16, "__llvm_memmove_element_unordered_atomic_16")
+HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_1, "__llvm_memset_element_unordered_atomic_1")
+HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_2, "__llvm_memset_element_unordered_atomic_2")
+HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_4, "__llvm_memset_element_unordered_atomic_4")
+HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_8, "__llvm_memset_element_unordered_atomic_8")
+HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_16, "__llvm_memset_element_unordered_atomic_16")
+
+// Exception handling
+HANDLE_LIBCALL(UNWIND_RESUME, "_Unwind_Resume")
+
+// Note: there are two sets of atomics libcalls; see
+// <https://llvm.org/docs/Atomics.html> for more info on the
+// difference between them.
+
+// Atomic '__sync_*' libcalls.
+HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_1, "__sync_val_compare_and_swap_1")
+HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_2, "__sync_val_compare_and_swap_2")
+HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_4, "__sync_val_compare_and_swap_4")
+HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_8, "__sync_val_compare_and_swap_8")
+HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_16, "__sync_val_compare_and_swap_16")
+HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_1, "__sync_lock_test_and_set_1")
+HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_2, "__sync_lock_test_and_set_2")
+HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_4, "__sync_lock_test_and_set_4")
+HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_8, "__sync_lock_test_and_set_8")
+HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_16, "__sync_lock_test_and_set_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_1, "__sync_fetch_and_add_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_2, "__sync_fetch_and_add_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_4, "__sync_fetch_and_add_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_8, "__sync_fetch_and_add_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_16, "__sync_fetch_and_add_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_1, "__sync_fetch_and_sub_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_2, "__sync_fetch_and_sub_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_4, "__sync_fetch_and_sub_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_8, "__sync_fetch_and_sub_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_16, "__sync_fetch_and_sub_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_AND_1, "__sync_fetch_and_and_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_AND_2, "__sync_fetch_and_and_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_AND_4, "__sync_fetch_and_and_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_AND_8, "__sync_fetch_and_and_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_AND_16, "__sync_fetch_and_and_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_OR_1, "__sync_fetch_and_or_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_OR_2, "__sync_fetch_and_or_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_OR_4, "__sync_fetch_and_or_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_OR_8, "__sync_fetch_and_or_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_OR_16, "__sync_fetch_and_or_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_1, "__sync_fetch_and_xor_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_2, "__sync_fetch_and_xor_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_4, "__sync_fetch_and_xor_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_8, "__sync_fetch_and_xor_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_16, "__sync_fetch_and_xor_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_1, "__sync_fetch_and_nand_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_2, "__sync_fetch_and_nand_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_4, "__sync_fetch_and_nand_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_8, "__sync_fetch_and_nand_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_16, "__sync_fetch_and_nand_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_1, "__sync_fetch_and_max_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_2, "__sync_fetch_and_max_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_4, "__sync_fetch_and_max_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_8, "__sync_fetch_and_max_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_16, "__sync_fetch_and_max_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_1, "__sync_fetch_and_umax_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_2, "__sync_fetch_and_umax_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_4, "__sync_fetch_and_umax_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_8, "__sync_fetch_and_umax_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_16, "__sync_fetch_and_umax_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_1, "__sync_fetch_and_min_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_2, "__sync_fetch_and_min_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_4, "__sync_fetch_and_min_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_8, "__sync_fetch_and_min_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_16, "__sync_fetch_and_min_16")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_1, "__sync_fetch_and_umin_1")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_2, "__sync_fetch_and_umin_2")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_4, "__sync_fetch_and_umin_4")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_8, "__sync_fetch_and_umin_8")
+HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_16, "__sync_fetch_and_umin_16")
+
+// Atomic `__atomic_*' libcalls.
+HANDLE_LIBCALL(ATOMIC_LOAD, "__atomic_load")
+HANDLE_LIBCALL(ATOMIC_LOAD_1, "__atomic_load_1")
+HANDLE_LIBCALL(ATOMIC_LOAD_2, "__atomic_load_2")
+HANDLE_LIBCALL(ATOMIC_LOAD_4, "__atomic_load_4")
+HANDLE_LIBCALL(ATOMIC_LOAD_8, "__atomic_load_8")
+HANDLE_LIBCALL(ATOMIC_LOAD_16, "__atomic_load_16")
+
+HANDLE_LIBCALL(ATOMIC_STORE, "__atomic_store")
+HANDLE_LIBCALL(ATOMIC_STORE_1, "__atomic_store_1")
+HANDLE_LIBCALL(ATOMIC_STORE_2, "__atomic_store_2")
+HANDLE_LIBCALL(ATOMIC_STORE_4, "__atomic_store_4")
+HANDLE_LIBCALL(ATOMIC_STORE_8, "__atomic_store_8")
+HANDLE_LIBCALL(ATOMIC_STORE_16, "__atomic_store_16")
+
+HANDLE_LIBCALL(ATOMIC_EXCHANGE, "__atomic_exchange")
+HANDLE_LIBCALL(ATOMIC_EXCHANGE_1, "__atomic_exchange_1")
+HANDLE_LIBCALL(ATOMIC_EXCHANGE_2, "__atomic_exchange_2")
+HANDLE_LIBCALL(ATOMIC_EXCHANGE_4, "__atomic_exchange_4")
+HANDLE_LIBCALL(ATOMIC_EXCHANGE_8, "__atomic_exchange_8")
+HANDLE_LIBCALL(ATOMIC_EXCHANGE_16, "__atomic_exchange_16")
+
+HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE, "__atomic_compare_exchange")
+HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_1, "__atomic_compare_exchange_1")
+HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_2, "__atomic_compare_exchange_2")
+HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_4, "__atomic_compare_exchange_4")
+HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_8, "__atomic_compare_exchange_8")
+HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_16, "__atomic_compare_exchange_16")
+
+HANDLE_LIBCALL(ATOMIC_FETCH_ADD_1, "__atomic_fetch_add_1")
+HANDLE_LIBCALL(ATOMIC_FETCH_ADD_2, "__atomic_fetch_add_2")
+HANDLE_LIBCALL(ATOMIC_FETCH_ADD_4, "__atomic_fetch_add_4")
+HANDLE_LIBCALL(ATOMIC_FETCH_ADD_8, "__atomic_fetch_add_8")
+HANDLE_LIBCALL(ATOMIC_FETCH_ADD_16, "__atomic_fetch_add_16")
+HANDLE_LIBCALL(ATOMIC_FETCH_SUB_1, "__atomic_fetch_sub_1")
+HANDLE_LIBCALL(ATOMIC_FETCH_SUB_2, "__atomic_fetch_sub_2")
+HANDLE_LIBCALL(ATOMIC_FETCH_SUB_4, "__atomic_fetch_sub_4")
+HANDLE_LIBCALL(ATOMIC_FETCH_SUB_8, "__atomic_fetch_sub_8")
+HANDLE_LIBCALL(ATOMIC_FETCH_SUB_16, "__atomic_fetch_sub_16")
+HANDLE_LIBCALL(ATOMIC_FETCH_AND_1, "__atomic_fetch_and_1")
+HANDLE_LIBCALL(ATOMIC_FETCH_AND_2, "__atomic_fetch_and_2")
+HANDLE_LIBCALL(ATOMIC_FETCH_AND_4, "__atomic_fetch_and_4")
+HANDLE_LIBCALL(ATOMIC_FETCH_AND_8, "__atomic_fetch_and_8")
+HANDLE_LIBCALL(ATOMIC_FETCH_AND_16, "__atomic_fetch_and_16")
+HANDLE_LIBCALL(ATOMIC_FETCH_OR_1, "__atomic_fetch_or_1")
+HANDLE_LIBCALL(ATOMIC_FETCH_OR_2, "__atomic_fetch_or_2")
+HANDLE_LIBCALL(ATOMIC_FETCH_OR_4, "__atomic_fetch_or_4")
+HANDLE_LIBCALL(ATOMIC_FETCH_OR_8, "__atomic_fetch_or_8")
+HANDLE_LIBCALL(ATOMIC_FETCH_OR_16, "__atomic_fetch_or_16")
+HANDLE_LIBCALL(ATOMIC_FETCH_XOR_1, "__atomic_fetch_xor_1")
+HANDLE_LIBCALL(ATOMIC_FETCH_XOR_2, "__atomic_fetch_xor_2")
+HANDLE_LIBCALL(ATOMIC_FETCH_XOR_4, "__atomic_fetch_xor_4")
+HANDLE_LIBCALL(ATOMIC_FETCH_XOR_8, "__atomic_fetch_xor_8")
+HANDLE_LIBCALL(ATOMIC_FETCH_XOR_16, "__atomic_fetch_xor_16")
+HANDLE_LIBCALL(ATOMIC_FETCH_NAND_1, "__atomic_fetch_nand_1")
+HANDLE_LIBCALL(ATOMIC_FETCH_NAND_2, "__atomic_fetch_nand_2")
+HANDLE_LIBCALL(ATOMIC_FETCH_NAND_4, "__atomic_fetch_nand_4")
+HANDLE_LIBCALL(ATOMIC_FETCH_NAND_8, "__atomic_fetch_nand_8")
+HANDLE_LIBCALL(ATOMIC_FETCH_NAND_16, "__atomic_fetch_nand_16")
+
+// Stack Protector Fail
+HANDLE_LIBCALL(STACKPROTECTOR_CHECK_FAIL, "__stack_chk_fail")
+
+// Deoptimization
+HANDLE_LIBCALL(DEOPTIMIZE, "__llvm_deoptimize")
+
+HANDLE_LIBCALL(UNKNOWN_LIBCALL, nullptr)
+
+#undef HANDLE_LIBCALL
diff --git a/linux-x64/clang/include/llvm/CodeGen/RuntimeLibcalls.h b/linux-x64/clang/include/llvm/CodeGen/RuntimeLibcalls.h
new file mode 100644
index 0000000..016bef1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/RuntimeLibcalls.h
@@ -0,0 +1,82 @@
+//===-- CodeGen/RuntimeLibcalls.h - Runtime Library Calls -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the enum representing the list of runtime library calls
+// the backend may emit during code generation, and also some helper functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_RUNTIMELIBCALLS_H
+#define LLVM_CODEGEN_RUNTIMELIBCALLS_H
+
+#include "llvm/CodeGen/ValueTypes.h"
+
+namespace llvm {
+namespace RTLIB {
+  /// RTLIB::Libcall enum - This enum defines all of the runtime library calls
+  /// the backend can emit.  The various long double types cannot be merged,
+  /// because 80-bit library functions use "xf" and 128-bit use "tf".
+  ///
+  /// When adding PPCF128 functions here, note that their names generally need
+  /// to be overridden for Darwin with the xxx$LDBL128 form.  See
+  /// PPCISelLowering.cpp.
+  ///
+  enum Libcall {
+#define HANDLE_LIBCALL(code, name) code,
+    #include "RuntimeLibcalls.def"
+#undef HANDLE_LIBCALL
+  };
+
+  /// getFPEXT - Return the FPEXT_*_* value for the given types, or
+  /// UNKNOWN_LIBCALL if there is none.
+  Libcall getFPEXT(EVT OpVT, EVT RetVT);
+
+  /// getFPROUND - Return the FPROUND_*_* value for the given types, or
+  /// UNKNOWN_LIBCALL if there is none.
+  Libcall getFPROUND(EVT OpVT, EVT RetVT);
+
+  /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
+  /// UNKNOWN_LIBCALL if there is none.
+  Libcall getFPTOSINT(EVT OpVT, EVT RetVT);
+
+  /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
+  /// UNKNOWN_LIBCALL if there is none.
+  Libcall getFPTOUINT(EVT OpVT, EVT RetVT);
+
+  /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
+  /// UNKNOWN_LIBCALL if there is none.
+  Libcall getSINTTOFP(EVT OpVT, EVT RetVT);
+
+  /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
+  /// UNKNOWN_LIBCALL if there is none.
+  Libcall getUINTTOFP(EVT OpVT, EVT RetVT);
+
+  /// Return the SYNC_FETCH_AND_* value for the given opcode and type, or
+  /// UNKNOWN_LIBCALL if there is none.
+  Libcall getSYNC(unsigned Opc, MVT VT);
+
+  /// getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return
+  /// MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
+  /// UNKNOW_LIBCALL if there is none.
+  Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);
+
+  /// getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return
+  /// MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
+  /// UNKNOW_LIBCALL if there is none.
+  Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);
+
+  /// getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return
+  /// MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
+  /// UNKNOW_LIBCALL if there is none.
+  Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);
+
+}
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/SDNodeProperties.td b/linux-x64/clang/include/llvm/CodeGen/SDNodeProperties.td
new file mode 100644
index 0000000..83bbab2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/SDNodeProperties.td
@@ -0,0 +1,34 @@
+//===- SDNodeProperties.td - Common code for DAG isels ---*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+class SDNodeProperty;
+
+// Selection DAG Pattern Operations
+class SDPatternOperator {
+  list<SDNodeProperty> Properties = [];
+}
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Node Properties.
+//
+// Note: These are hard coded into tblgen.
+//
+def SDNPCommutative : SDNodeProperty;   // X op Y == Y op X
+def SDNPAssociative : SDNodeProperty;   // (X op Y) op Z == X op (Y op Z)
+def SDNPHasChain    : SDNodeProperty;   // R/W chain operand and result
+def SDNPOutGlue     : SDNodeProperty;   // Write a flag result
+def SDNPInGlue      : SDNodeProperty;   // Read a flag operand
+def SDNPOptInGlue   : SDNodeProperty;   // Optionally read a flag operand
+def SDNPMayStore    : SDNodeProperty;   // May write to memory, sets 'mayStore'.
+def SDNPMayLoad     : SDNodeProperty;   // May read memory, sets 'mayLoad'.
+def SDNPSideEffect  : SDNodeProperty;   // Sets 'HasUnmodelledSideEffects'.
+def SDNPMemOperand  : SDNodeProperty;   // Touches memory, has assoc MemOperand
+def SDNPVariadic    : SDNodeProperty;   // Node has variable arguments.
+def SDNPWantRoot    : SDNodeProperty;   // ComplexPattern gets the root of match
+def SDNPWantParent  : SDNodeProperty;   // ComplexPattern gets the parent
diff --git a/linux-x64/clang/include/llvm/CodeGen/ScheduleDAG.h b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAG.h
new file mode 100644
index 0000000..f3f2f05
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAG.h
@@ -0,0 +1,764 @@
+//===- llvm/CodeGen/ScheduleDAG.h - Common Base Class -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Implements the ScheduleDAG class, which is used as the common base
+/// class for instruction schedulers. This encapsulates the scheduling DAG,
+/// which is shared between SelectionDAG and MachineInstr scheduling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULEDAG_H
+#define LLVM_CODEGEN_SCHEDULEDAG_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+template<class Graph> class GraphWriter;
+class MachineFunction;
+class MachineRegisterInfo;
+class MCInstrDesc;
+struct MCSchedClassDesc;
+class ScheduleDAG;
+class SDNode;
+class SUnit;
+class TargetInstrInfo;
+class TargetMachine;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+  /// Scheduling dependency. This represents one direction of an edge in the
+  /// scheduling DAG.
+  class SDep {
+  public:
+    /// These are the different kinds of scheduling dependencies.
+    enum Kind {
+      Data,        ///< Regular data dependence (aka true-dependence).
+      Anti,        ///< A register anti-dependence (aka WAR).
+      Output,      ///< A register output-dependence (aka WAW).
+      Order        ///< Any other ordering dependency.
+    };
+
+    // Strong dependencies must be respected by the scheduler. Artificial
+    // dependencies may be removed only if they are redundant with another
+    // strong dependence.
+    //
+    // Weak dependencies may be violated by the scheduling strategy, but only if
+    // the strategy can prove it is correct to do so.
+    //
+    // Strong OrderKinds must occur before "Weak".
+    // Weak OrderKinds must occur after "Weak".
+    enum OrderKind {
+      Barrier,      ///< An unknown scheduling barrier.
+      MayAliasMem,  ///< Nonvolatile load/Store instructions that may alias.
+      MustAliasMem, ///< Nonvolatile load/Store instructions that must alias.
+      Artificial,   ///< Arbitrary strong DAG edge (no real dependence).
+      Weak,         ///< Arbitrary weak DAG edge.
+      Cluster       ///< Weak DAG edge linking a chain of clustered instrs.
+    };
+
+  private:
+    /// \brief A pointer to the depending/depended-on SUnit, and an enum
+    /// indicating the kind of the dependency.
+    PointerIntPair<SUnit *, 2, Kind> Dep;
+
+    /// A union discriminated by the dependence kind.
+    union {
+      /// For Data, Anti, and Output dependencies, the associated register. For
+      /// Data dependencies that don't currently have a register/ assigned, this
+      /// is set to zero.
+      unsigned Reg;
+
+      /// Additional information about Order dependencies.
+      unsigned OrdKind; // enum OrderKind
+    } Contents;
+
+    /// The time associated with this edge. Often this is just the value of the
+    /// Latency field of the predecessor, however advanced models may provide
+    /// additional information about specific edges.
+    unsigned Latency;
+
+  public:
+    /// Constructs a null SDep. This is only for use by container classes which
+    /// require default constructors. SUnits may not/ have null SDep edges.
+    SDep() : Dep(nullptr, Data) {}
+
+    /// Constructs an SDep with the specified values.
+    SDep(SUnit *S, Kind kind, unsigned Reg)
+      : Dep(S, kind), Contents() {
+      switch (kind) {
+      default:
+        llvm_unreachable("Reg given for non-register dependence!");
+      case Anti:
+      case Output:
+        assert(Reg != 0 &&
+               "SDep::Anti and SDep::Output must use a non-zero Reg!");
+        Contents.Reg = Reg;
+        Latency = 0;
+        break;
+      case Data:
+        Contents.Reg = Reg;
+        Latency = 1;
+        break;
+      }
+    }
+
+    SDep(SUnit *S, OrderKind kind)
+      : Dep(S, Order), Contents(), Latency(0) {
+      Contents.OrdKind = kind;
+    }
+
+    /// Returns true if the specified SDep is equivalent except for latency.
+    bool overlaps(const SDep &Other) const;
+
+    bool operator==(const SDep &Other) const {
+      return overlaps(Other) && Latency == Other.Latency;
+    }
+
+    bool operator!=(const SDep &Other) const {
+      return !operator==(Other);
+    }
+
+    /// \brief Returns the latency value for this edge, which roughly means the
+    /// minimum number of cycles that must elapse between the predecessor and
+    /// the successor, given that they have this edge between them.
+    unsigned getLatency() const {
+      return Latency;
+    }
+
+    /// Sets the latency for this edge.
+    void setLatency(unsigned Lat) {
+      Latency = Lat;
+    }
+
+    //// Returns the SUnit to which this edge points.
+    SUnit *getSUnit() const;
+
+    //// Assigns the SUnit to which this edge points.
+    void setSUnit(SUnit *SU);
+
+    /// Returns an enum value representing the kind of the dependence.
+    Kind getKind() const;
+
+    /// Shorthand for getKind() != SDep::Data.
+    bool isCtrl() const {
+      return getKind() != Data;
+    }
+
+    /// \brief Tests if this is an Order dependence between two memory accesses
+    /// where both sides of the dependence access memory in non-volatile and
+    /// fully modeled ways.
+    bool isNormalMemory() const {
+      return getKind() == Order && (Contents.OrdKind == MayAliasMem
+                                    || Contents.OrdKind == MustAliasMem);
+    }
+
+    /// Tests if this is an Order dependence that is marked as a barrier.
+    bool isBarrier() const {
+      return getKind() == Order && Contents.OrdKind == Barrier;
+    }
+
+    /// Tests if this is could be any kind of memory dependence.
+    bool isNormalMemoryOrBarrier() const {
+      return (isNormalMemory() || isBarrier());
+    }
+
+    /// \brief Tests if this is an Order dependence that is marked as
+    /// "must alias", meaning that the SUnits at either end of the edge have a
+    /// memory dependence on a known memory location.
+    bool isMustAlias() const {
+      return getKind() == Order && Contents.OrdKind == MustAliasMem;
+    }
+
+    /// Tests if this a weak dependence. Weak dependencies are considered DAG
+    /// edges for height computation and other heuristics, but do not force
+    /// ordering. Breaking a weak edge may require the scheduler to compensate,
+    /// for example by inserting a copy.
+    bool isWeak() const {
+      return getKind() == Order && Contents.OrdKind >= Weak;
+    }
+
+    /// \brief Tests if this is an Order dependence that is marked as
+    /// "artificial", meaning it isn't necessary for correctness.
+    bool isArtificial() const {
+      return getKind() == Order && Contents.OrdKind == Artificial;
+    }
+
+    /// \brief Tests if this is an Order dependence that is marked as "cluster",
+    /// meaning it is artificial and wants to be adjacent.
+    bool isCluster() const {
+      return getKind() == Order && Contents.OrdKind == Cluster;
+    }
+
+    /// Tests if this is a Data dependence that is associated with a register.
+    bool isAssignedRegDep() const {
+      return getKind() == Data && Contents.Reg != 0;
+    }
+
+    /// Returns the register associated with this edge. This is only valid on
+    /// Data, Anti, and Output edges. On Data edges, this value may be zero,
+    /// meaning there is no associated register.
+    unsigned getReg() const {
+      assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
+             "getReg called on non-register dependence edge!");
+      return Contents.Reg;
+    }
+
+    /// Assigns the associated register for this edge. This is only valid on
+    /// Data, Anti, and Output edges. On Anti and Output edges, this value must
+    /// not be zero. On Data edges, the value may be zero, which would mean that
+    /// no specific register is associated with this edge.
+    void setReg(unsigned Reg) {
+      assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
+             "setReg called on non-register dependence edge!");
+      assert((getKind() != Anti || Reg != 0) &&
+             "SDep::Anti edge cannot use the zero register!");
+      assert((getKind() != Output || Reg != 0) &&
+             "SDep::Output edge cannot use the zero register!");
+      Contents.Reg = Reg;
+    }
+
+    raw_ostream &print(raw_ostream &O,
+                       const TargetRegisterInfo *TRI = nullptr) const;
+  };
+
+  template <>
+  struct isPodLike<SDep> { static const bool value = true; };
+
+  /// Scheduling unit. This is a node in the scheduling DAG.
+  class SUnit {
+  private:
+    enum : unsigned { BoundaryID = ~0u };
+
+    SDNode *Node = nullptr;        ///< Representative node.
+    MachineInstr *Instr = nullptr; ///< Alternatively, a MachineInstr.
+
+  public:
+    SUnit *OrigNode = nullptr; ///< If not this, the node from which this node 
+                               /// was cloned. (SD scheduling only)
+
+    const MCSchedClassDesc *SchedClass =
+        nullptr; ///< nullptr or resolved SchedClass.
+
+    SmallVector<SDep, 4> Preds;  ///< All sunit predecessors.
+    SmallVector<SDep, 4> Succs;  ///< All sunit successors.
+
+    typedef SmallVectorImpl<SDep>::iterator pred_iterator;
+    typedef SmallVectorImpl<SDep>::iterator succ_iterator;
+    typedef SmallVectorImpl<SDep>::const_iterator const_pred_iterator;
+    typedef SmallVectorImpl<SDep>::const_iterator const_succ_iterator;
+
+    unsigned NodeNum = BoundaryID;     ///< Entry # of node in the node vector.
+    unsigned NodeQueueId = 0;          ///< Queue id of node.
+    unsigned NumPreds = 0;             ///< # of SDep::Data preds.
+    unsigned NumSuccs = 0;             ///< # of SDep::Data sucss.
+    unsigned NumPredsLeft = 0;         ///< # of preds not scheduled.
+    unsigned NumSuccsLeft = 0;         ///< # of succs not scheduled.
+    unsigned WeakPredsLeft = 0;        ///< # of weak preds not scheduled.
+    unsigned WeakSuccsLeft = 0;        ///< # of weak succs not scheduled.
+    unsigned short NumRegDefsLeft = 0; ///< # of reg defs with no scheduled use.
+    unsigned short Latency = 0;        ///< Node latency.
+    bool isVRegCycle      : 1;         ///< May use and def the same vreg.
+    bool isCall           : 1;         ///< Is a function call.
+    bool isCallOp         : 1;         ///< Is a function call operand.
+    bool isTwoAddress     : 1;         ///< Is a two-address instruction.
+    bool isCommutable     : 1;         ///< Is a commutable instruction.
+    bool hasPhysRegUses   : 1;         ///< Has physreg uses.
+    bool hasPhysRegDefs   : 1;         ///< Has physreg defs that are being used.
+    bool hasPhysRegClobbers : 1;       ///< Has any physreg defs, used or not.
+    bool isPending        : 1;         ///< True once pending.
+    bool isAvailable      : 1;         ///< True once available.
+    bool isScheduled      : 1;         ///< True once scheduled.
+    bool isScheduleHigh   : 1;         ///< True if preferable to schedule high.
+    bool isScheduleLow    : 1;         ///< True if preferable to schedule low.
+    bool isCloned         : 1;         ///< True if this node has been cloned.
+    bool isUnbuffered     : 1;         ///< Uses an unbuffered resource.
+    bool hasReservedResource : 1;      ///< Uses a reserved resource.
+    Sched::Preference SchedulingPref = Sched::None; ///< Scheduling preference.
+
+  private:
+    bool isDepthCurrent   : 1;         ///< True if Depth is current.
+    bool isHeightCurrent  : 1;         ///< True if Height is current.
+    unsigned Depth = 0;                ///< Node depth.
+    unsigned Height = 0;               ///< Node height.
+
+  public:
+    unsigned TopReadyCycle = 0; ///< Cycle relative to start when node is ready.
+    unsigned BotReadyCycle = 0; ///< Cycle relative to end when node is ready.
+
+    const TargetRegisterClass *CopyDstRC =
+        nullptr; ///< Is a special copy node if != nullptr.
+    const TargetRegisterClass *CopySrcRC = nullptr;
+
+    /// \brief Constructs an SUnit for pre-regalloc scheduling to represent an
+    /// SDNode and any nodes flagged to it.
+    SUnit(SDNode *node, unsigned nodenum)
+      : Node(node), NodeNum(nodenum), isVRegCycle(false), isCall(false),
+        isCallOp(false), isTwoAddress(false), isCommutable(false),
+        hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
+        isPending(false), isAvailable(false), isScheduled(false),
+        isScheduleHigh(false), isScheduleLow(false), isCloned(false),
+        isUnbuffered(false), hasReservedResource(false), isDepthCurrent(false),
+        isHeightCurrent(false) {}
+
+    /// \brief Constructs an SUnit for post-regalloc scheduling to represent a
+    /// MachineInstr.
+    SUnit(MachineInstr *instr, unsigned nodenum)
+      : Instr(instr), NodeNum(nodenum), isVRegCycle(false), isCall(false),
+        isCallOp(false), isTwoAddress(false), isCommutable(false),
+        hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
+        isPending(false), isAvailable(false), isScheduled(false),
+        isScheduleHigh(false), isScheduleLow(false), isCloned(false),
+        isUnbuffered(false), hasReservedResource(false), isDepthCurrent(false),
+        isHeightCurrent(false) {}
+
+    /// \brief Constructs a placeholder SUnit.
+    SUnit()
+      : isVRegCycle(false), isCall(false), isCallOp(false), isTwoAddress(false),
+        isCommutable(false), hasPhysRegUses(false), hasPhysRegDefs(false),
+        hasPhysRegClobbers(false), isPending(false), isAvailable(false),
+        isScheduled(false), isScheduleHigh(false), isScheduleLow(false),
+        isCloned(false), isUnbuffered(false), hasReservedResource(false),
+        isDepthCurrent(false), isHeightCurrent(false) {}
+
+    /// \brief Boundary nodes are placeholders for the boundary of the
+    /// scheduling region.
+    ///
+    /// BoundaryNodes can have DAG edges, including Data edges, but they do not
+    /// correspond to schedulable entities (e.g. instructions) and do not have a
+    /// valid ID. Consequently, always check for boundary nodes before accessing
+    /// an associative data structure keyed on node ID.
+    bool isBoundaryNode() const { return NodeNum == BoundaryID; }
+
+    /// Assigns the representative SDNode for this SUnit. This may be used
+    /// during pre-regalloc scheduling.
+    void setNode(SDNode *N) {
+      assert(!Instr && "Setting SDNode of SUnit with MachineInstr!");
+      Node = N;
+    }
+
+    /// Returns the representative SDNode for this SUnit. This may be used
+    /// during pre-regalloc scheduling.
+    SDNode *getNode() const {
+      assert(!Instr && "Reading SDNode of SUnit with MachineInstr!");
+      return Node;
+    }
+
+    /// \brief Returns true if this SUnit refers to a machine instruction as
+    /// opposed to an SDNode.
+    bool isInstr() const { return Instr; }
+
+    /// Assigns the instruction for the SUnit. This may be used during
+    /// post-regalloc scheduling.
+    void setInstr(MachineInstr *MI) {
+      assert(!Node && "Setting MachineInstr of SUnit with SDNode!");
+      Instr = MI;
+    }
+
+    /// Returns the representative MachineInstr for this SUnit. This may be used
+    /// during post-regalloc scheduling.
+    MachineInstr *getInstr() const {
+      assert(!Node && "Reading MachineInstr of SUnit with SDNode!");
+      return Instr;
+    }
+
+    /// Adds the specified edge as a pred of the current node if not already.
+    /// It also adds the current node as a successor of the specified node.
+    bool addPred(const SDep &D, bool Required = true);
+
+    /// \brief Adds a barrier edge to SU by calling addPred(), with latency 0
+    /// generally or latency 1 for a store followed by a load.
+    bool addPredBarrier(SUnit *SU) {
+      SDep Dep(SU, SDep::Barrier);
+      unsigned TrueMemOrderLatency =
+        ((SU->getInstr()->mayStore() && this->getInstr()->mayLoad()) ? 1 : 0);
+      Dep.setLatency(TrueMemOrderLatency);
+      return addPred(Dep);
+    }
+
+    /// Removes the specified edge as a pred of the current node if it exists.
+    /// It also removes the current node as a successor of the specified node.
+    void removePred(const SDep &D);
+
+    /// Returns the depth of this node, which is the length of the maximum path
+    /// up to any node which has no predecessors.
+    unsigned getDepth() const {
+      if (!isDepthCurrent)
+        const_cast<SUnit *>(this)->ComputeDepth();
+      return Depth;
+    }
+
+    /// \brief Returns the height of this node, which is the length of the
+    /// maximum path down to any node which has no successors.
+    unsigned getHeight() const {
+      if (!isHeightCurrent)
+        const_cast<SUnit *>(this)->ComputeHeight();
+      return Height;
+    }
+
+    /// \brief If NewDepth is greater than this node's depth value, sets it to
+    /// be the new depth value. This also recursively marks successor nodes
+    /// dirty.
+    void setDepthToAtLeast(unsigned NewDepth);
+
+    /// \brief If NewDepth is greater than this node's depth value, set it to be
+    /// the new height value. This also recursively marks predecessor nodes
+    /// dirty.
+    void setHeightToAtLeast(unsigned NewHeight);
+
+    /// \brief Sets a flag in this node to indicate that its stored Depth value
+    /// will require recomputation the next time getDepth() is called.
+    void setDepthDirty();
+
+    /// \brief Sets a flag in this node to indicate that its stored Height value
+    /// will require recomputation the next time getHeight() is called.
+    void setHeightDirty();
+
+    /// Tests if node N is a predecessor of this node.
+    bool isPred(const SUnit *N) const {
+      for (const SDep &Pred : Preds)
+        if (Pred.getSUnit() == N)
+          return true;
+      return false;
+    }
+
+    /// Tests if node N is a successor of this node.
+    bool isSucc(const SUnit *N) const {
+      for (const SDep &Succ : Succs)
+        if (Succ.getSUnit() == N)
+          return true;
+      return false;
+    }
+
+    bool isTopReady() const {
+      return NumPredsLeft == 0;
+    }
+    bool isBottomReady() const {
+      return NumSuccsLeft == 0;
+    }
+
+    /// \brief Orders this node's predecessor edges such that the critical path
+    /// edge occurs first.
+    void biasCriticalPath();
+
+    void dump(const ScheduleDAG *G) const;
+    void dumpAll(const ScheduleDAG *G) const;
+    raw_ostream &print(raw_ostream &O,
+                       const SUnit *N = nullptr,
+                       const SUnit *X = nullptr) const;
+    raw_ostream &print(raw_ostream &O, const ScheduleDAG *G) const;
+
+  private:
+    void ComputeDepth();
+    void ComputeHeight();
+  };
+
+  /// Returns true if the specified SDep is equivalent except for latency.
+  inline bool SDep::overlaps(const SDep &Other) const {
+    if (Dep != Other.Dep)
+      return false;
+    switch (Dep.getInt()) {
+    case Data:
+    case Anti:
+    case Output:
+      return Contents.Reg == Other.Contents.Reg;
+    case Order:
+      return Contents.OrdKind == Other.Contents.OrdKind;
+    }
+    llvm_unreachable("Invalid dependency kind!");
+  }
+
+  //// Returns the SUnit to which this edge points.
+  inline SUnit *SDep::getSUnit() const { return Dep.getPointer(); }
+
+  //// Assigns the SUnit to which this edge points.
+  inline void SDep::setSUnit(SUnit *SU) { Dep.setPointer(SU); }
+
+  /// Returns an enum value representing the kind of the dependence.
+  inline SDep::Kind SDep::getKind() const { return Dep.getInt(); }
+
+  //===--------------------------------------------------------------------===//
+
+  /// \brief This interface is used to plug different priorities computation
+  /// algorithms into the list scheduler. It implements the interface of a
+  /// standard priority queue, where nodes are inserted in arbitrary order and
+  /// returned in priority order.  The computation of the priority and the
+  /// representation of the queue are totally up to the implementation to
+  /// decide.
+  class SchedulingPriorityQueue {
+    virtual void anchor();
+
+    unsigned CurCycle = 0;
+    bool HasReadyFilter;
+
+  public:
+    SchedulingPriorityQueue(bool rf = false) :  HasReadyFilter(rf) {}
+
+    virtual ~SchedulingPriorityQueue() = default;
+
+    virtual bool isBottomUp() const = 0;
+
+    virtual void initNodes(std::vector<SUnit> &SUnits) = 0;
+    virtual void addNode(const SUnit *SU) = 0;
+    virtual void updateNode(const SUnit *SU) = 0;
+    virtual void releaseState() = 0;
+
+    virtual bool empty() const = 0;
+
+    bool hasReadyFilter() const { return HasReadyFilter; }
+
+    virtual bool tracksRegPressure() const { return false; }
+
+    virtual bool isReady(SUnit *) const {
+      assert(!HasReadyFilter && "The ready filter must override isReady()");
+      return true;
+    }
+
+    virtual void push(SUnit *U) = 0;
+
+    void push_all(const std::vector<SUnit *> &Nodes) {
+      for (std::vector<SUnit *>::const_iterator I = Nodes.begin(),
+           E = Nodes.end(); I != E; ++I)
+        push(*I);
+    }
+
+    virtual SUnit *pop() = 0;
+
+    virtual void remove(SUnit *SU) = 0;
+
+    virtual void dump(ScheduleDAG *) const {}
+
+    /// As each node is scheduled, this method is invoked.  This allows the
+    /// priority function to adjust the priority of related unscheduled nodes,
+    /// for example.
+    virtual void scheduledNode(SUnit *) {}
+
+    virtual void unscheduledNode(SUnit *) {}
+
+    void setCurCycle(unsigned Cycle) {
+      CurCycle = Cycle;
+    }
+
+    unsigned getCurCycle() const {
+      return CurCycle;
+    }
+  };
+
+  class ScheduleDAG {
+  public:
+    const TargetMachine &TM;            ///< Target processor
+    const TargetInstrInfo *TII;         ///< Target instruction information
+    const TargetRegisterInfo *TRI;      ///< Target processor register info
+    MachineFunction &MF;                ///< Machine function
+    MachineRegisterInfo &MRI;           ///< Virtual/real register map
+    std::vector<SUnit> SUnits;          ///< The scheduling units.
+    SUnit EntrySU;                      ///< Special node for the region entry.
+    SUnit ExitSU;                       ///< Special node for the region exit.
+
+#ifdef NDEBUG
+    static const bool StressSched = false;
+#else
+    bool StressSched;
+#endif
+
+    explicit ScheduleDAG(MachineFunction &mf);
+
+    virtual ~ScheduleDAG();
+
+    /// Clears the DAG state (between regions).
+    void clearDAG();
+
+    /// Returns the MCInstrDesc of this SUnit.
+    /// Returns NULL for SDNodes without a machine opcode.
+    const MCInstrDesc *getInstrDesc(const SUnit *SU) const {
+      if (SU->isInstr()) return &SU->getInstr()->getDesc();
+      return getNodeDesc(SU->getNode());
+    }
+
+    /// Pops up a GraphViz/gv window with the ScheduleDAG rendered using 'dot'.
+    virtual void viewGraph(const Twine &Name, const Twine &Title);
+    virtual void viewGraph();
+
+    virtual void dumpNode(const SUnit *SU) const = 0;
+
+    /// Returns a label for an SUnit node in a visualization of the ScheduleDAG.
+    virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0;
+
+    /// Returns a label for the region of code covered by the DAG.
+    virtual std::string getDAGName() const = 0;
+
+    /// Adds custom features for a visualization of the ScheduleDAG.
+    virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {}
+
+#ifndef NDEBUG
+    /// \brief Verifies that all SUnits were scheduled and that their state is
+    /// consistent. Returns the number of scheduled SUnits.
+    unsigned VerifyScheduledDAG(bool isBottomUp);
+#endif
+
+  private:
+    /// Returns the MCInstrDesc of this SDNode or NULL.
+    const MCInstrDesc *getNodeDesc(const SDNode *Node) const;
+  };
+
+  class SUnitIterator : public std::iterator<std::forward_iterator_tag,
+                                             SUnit, ptrdiff_t> {
+    SUnit *Node;
+    unsigned Operand;
+
+    SUnitIterator(SUnit *N, unsigned Op) : Node(N), Operand(Op) {}
+
+  public:
+    bool operator==(const SUnitIterator& x) const {
+      return Operand == x.Operand;
+    }
+    bool operator!=(const SUnitIterator& x) const { return !operator==(x); }
+
+    pointer operator*() const {
+      return Node->Preds[Operand].getSUnit();
+    }
+    pointer operator->() const { return operator*(); }
+
+    SUnitIterator& operator++() {                // Preincrement
+      ++Operand;
+      return *this;
+    }
+    SUnitIterator operator++(int) { // Postincrement
+      SUnitIterator tmp = *this; ++*this; return tmp;
+    }
+
+    static SUnitIterator begin(SUnit *N) { return SUnitIterator(N, 0); }
+    static SUnitIterator end  (SUnit *N) {
+      return SUnitIterator(N, (unsigned)N->Preds.size());
+    }
+
+    unsigned getOperand() const { return Operand; }
+    const SUnit *getNode() const { return Node; }
+
+    /// Tests if this is not an SDep::Data dependence.
+    bool isCtrlDep() const {
+      return getSDep().isCtrl();
+    }
+    bool isArtificialDep() const {
+      return getSDep().isArtificial();
+    }
+    const SDep &getSDep() const {
+      return Node->Preds[Operand];
+    }
+  };
+
+  template <> struct GraphTraits<SUnit*> {
+    typedef SUnit *NodeRef;
+    typedef SUnitIterator ChildIteratorType;
+    static NodeRef getEntryNode(SUnit *N) { return N; }
+    static ChildIteratorType child_begin(NodeRef N) {
+      return SUnitIterator::begin(N);
+    }
+    static ChildIteratorType child_end(NodeRef N) {
+      return SUnitIterator::end(N);
+    }
+  };
+
+  template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> {
+    typedef pointer_iterator<std::vector<SUnit>::iterator> nodes_iterator;
+    static nodes_iterator nodes_begin(ScheduleDAG *G) {
+      return nodes_iterator(G->SUnits.begin());
+    }
+    static nodes_iterator nodes_end(ScheduleDAG *G) {
+      return nodes_iterator(G->SUnits.end());
+    }
+  };
+
+  /// This class can compute a topological ordering for SUnits and provides
+  /// methods for dynamically updating the ordering as new edges are added.
+  ///
+  /// This allows a very fast implementation of IsReachable, for example.
+  class ScheduleDAGTopologicalSort {
+    /// A reference to the ScheduleDAG's SUnits.
+    std::vector<SUnit> &SUnits;
+    SUnit *ExitSU;
+
+    /// Maps topological index to the node number.
+    std::vector<int> Index2Node;
+    /// Maps the node number to its topological index.
+    std::vector<int> Node2Index;
+    /// a set of nodes visited during a DFS traversal.
+    BitVector Visited;
+
+    /// Makes a DFS traversal and mark all nodes affected by the edge insertion.
+    /// These nodes will later get new topological indexes by means of the Shift
+    /// method.
+    void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
+
+    /// \brief Reassigns topological indexes for the nodes in the DAG to
+    /// preserve the topological ordering.
+    void Shift(BitVector& Visited, int LowerBound, int UpperBound);
+
+    /// Assigns the topological index to the node n.
+    void Allocate(int n, int index);
+
+  public:
+    ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits, SUnit *ExitSU);
+
+    /// Creates the initial topological ordering from the DAG to be scheduled.
+    void InitDAGTopologicalSorting();
+
+    /// Returns an array of SUs that are both in the successor
+    /// subtree of StartSU and in the predecessor subtree of TargetSU.
+    /// StartSU and TargetSU are not in the array.
+    /// Success is false if TargetSU is not in the successor subtree of
+    /// StartSU, else it is true.
+    std::vector<int> GetSubGraph(const SUnit &StartSU, const SUnit &TargetSU,
+                                 bool &Success);
+
+    /// Checks if \p SU is reachable from \p TargetSU.
+    bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
+
+    /// Returns true if addPred(TargetSU, SU) creates a cycle.
+    bool WillCreateCycle(SUnit *TargetSU, SUnit *SU);
+
+    /// \brief Updates the topological ordering to accommodate an edge to be
+    /// added from SUnit \p X to SUnit \p Y.
+    void AddPred(SUnit *Y, SUnit *X);
+
+    /// \brief Updates the topological ordering to accommodate an an edge to be
+    /// removed from the specified node \p N from the predecessors of the
+    /// current node \p M.
+    void RemovePred(SUnit *M, SUnit *N);
+
+    typedef std::vector<int>::iterator iterator;
+    typedef std::vector<int>::const_iterator const_iterator;
+    iterator begin() { return Index2Node.begin(); }
+    const_iterator begin() const { return Index2Node.begin(); }
+    iterator end() { return Index2Node.end(); }
+    const_iterator end() const { return Index2Node.end(); }
+
+    typedef std::vector<int>::reverse_iterator reverse_iterator;
+    typedef std::vector<int>::const_reverse_iterator const_reverse_iterator;
+    reverse_iterator rbegin() { return Index2Node.rbegin(); }
+    const_reverse_iterator rbegin() const { return Index2Node.rbegin(); }
+    reverse_iterator rend() { return Index2Node.rend(); }
+    const_reverse_iterator rend() const { return Index2Node.rend(); }
+  };
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SCHEDULEDAG_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGInstrs.h b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGInstrs.h
new file mode 100644
index 0000000..1488220
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGInstrs.h
@@ -0,0 +1,384 @@
+//===- ScheduleDAGInstrs.h - MachineInstr Scheduling ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Implements the ScheduleDAGInstrs class, which implements scheduling
+/// for a MachineInstr-based dependency graph.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULEDAGINSTRS_H
+#define LLVM_CODEGEN_SCHEDULEDAGINSTRS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SparseMultiSet.h"
+#include "llvm/ADT/SparseSet.h"
+#include "llvm/CodeGen/LivePhysRegs.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/CodeGen/TargetSchedule.h"
+#include "llvm/MC/LaneBitmask.h"
+#include <cassert>
+#include <cstdint>
+#include <list>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+  class LiveIntervals;
+  class MachineFrameInfo;
+  class MachineFunction;
+  class MachineInstr;
+  class MachineLoopInfo;
+  class MachineOperand;
+  struct MCSchedClassDesc;
+  class PressureDiffs;
+  class PseudoSourceValue;
+  class RegPressureTracker;
+  class UndefValue;
+  class Value;
+
+  /// An individual mapping from virtual register number to SUnit.
+  struct VReg2SUnit {
+    unsigned VirtReg;
+    LaneBitmask LaneMask;
+    SUnit *SU;
+
+    VReg2SUnit(unsigned VReg, LaneBitmask LaneMask, SUnit *SU)
+      : VirtReg(VReg), LaneMask(LaneMask), SU(SU) {}
+
+    unsigned getSparseSetIndex() const {
+      return TargetRegisterInfo::virtReg2Index(VirtReg);
+    }
+  };
+
+  /// Mapping from virtual register to SUnit including an operand index.
+  struct VReg2SUnitOperIdx : public VReg2SUnit {
+    unsigned OperandIndex;
+
+    VReg2SUnitOperIdx(unsigned VReg, LaneBitmask LaneMask,
+                      unsigned OperandIndex, SUnit *SU)
+      : VReg2SUnit(VReg, LaneMask, SU), OperandIndex(OperandIndex) {}
+  };
+
+  /// Record a physical register access.
+  /// For non-data-dependent uses, OpIdx == -1.
+  struct PhysRegSUOper {
+    SUnit *SU;
+    int OpIdx;
+    unsigned Reg;
+
+    PhysRegSUOper(SUnit *su, int op, unsigned R): SU(su), OpIdx(op), Reg(R) {}
+
+    unsigned getSparseSetIndex() const { return Reg; }
+  };
+
+  /// Use a SparseMultiSet to track physical registers. Storage is only
+  /// allocated once for the pass. It can be cleared in constant time and reused
+  /// without any frees.
+  using Reg2SUnitsMap =
+      SparseMultiSet<PhysRegSUOper, identity<unsigned>, uint16_t>;
+
+  /// Use SparseSet as a SparseMap by relying on the fact that it never
+  /// compares ValueT's, only unsigned keys. This allows the set to be cleared
+  /// between scheduling regions in constant time as long as ValueT does not
+  /// require a destructor.
+  using VReg2SUnitMap = SparseSet<VReg2SUnit, VirtReg2IndexFunctor>;
+
+  /// Track local uses of virtual registers. These uses are gathered by the DAG
+  /// builder and may be consulted by the scheduler to avoid iterating an entire
+  /// vreg use list.
+  using VReg2SUnitMultiMap = SparseMultiSet<VReg2SUnit, VirtReg2IndexFunctor>;
+
+  using VReg2SUnitOperIdxMultiMap =
+      SparseMultiSet<VReg2SUnitOperIdx, VirtReg2IndexFunctor>;
+
+  using ValueType = PointerUnion<const Value *, const PseudoSourceValue *>;
+
+  struct UnderlyingObject : PointerIntPair<ValueType, 1, bool> {
+    UnderlyingObject(ValueType V, bool MayAlias)
+        : PointerIntPair<ValueType, 1, bool>(V, MayAlias) {}
+
+    ValueType getValue() const { return getPointer(); }
+    bool mayAlias() const { return getInt(); }
+  };
+
+  using UnderlyingObjectsVector = SmallVector<UnderlyingObject, 4>;
+
+  /// A ScheduleDAG for scheduling lists of MachineInstr.
+  class ScheduleDAGInstrs : public ScheduleDAG {
+  protected:
+    const MachineLoopInfo *MLI;
+    const MachineFrameInfo &MFI;
+
+    /// TargetSchedModel provides an interface to the machine model.
+    TargetSchedModel SchedModel;
+
+    /// True if the DAG builder should remove kill flags (in preparation for
+    /// rescheduling).
+    bool RemoveKillFlags;
+
+    /// The standard DAG builder does not normally include terminators as DAG
+    /// nodes because it does not create the necessary dependencies to prevent
+    /// reordering. A specialized scheduler can override
+    /// TargetInstrInfo::isSchedulingBoundary then enable this flag to indicate
+    /// it has taken responsibility for scheduling the terminator correctly.
+    bool CanHandleTerminators = false;
+
+    /// Whether lane masks should get tracked.
+    bool TrackLaneMasks = false;
+
+    // State specific to the current scheduling region.
+    // ------------------------------------------------
+
+    /// The block in which to insert instructions
+    MachineBasicBlock *BB;
+
+    /// The beginning of the range to be scheduled.
+    MachineBasicBlock::iterator RegionBegin;
+
+    /// The end of the range to be scheduled.
+    MachineBasicBlock::iterator RegionEnd;
+
+    /// Instructions in this region (distance(RegionBegin, RegionEnd)).
+    unsigned NumRegionInstrs;
+
+    /// After calling BuildSchedGraph, each machine instruction in the current
+    /// scheduling region is mapped to an SUnit.
+    DenseMap<MachineInstr*, SUnit*> MISUnitMap;
+
+    // State internal to DAG building.
+    // -------------------------------
+
+    /// Defs, Uses - Remember where defs and uses of each register are as we
+    /// iterate upward through the instructions. This is allocated here instead
+    /// of inside BuildSchedGraph to avoid the need for it to be initialized and
+    /// destructed for each block.
+    Reg2SUnitsMap Defs;
+    Reg2SUnitsMap Uses;
+
+    /// Tracks the last instruction(s) in this region defining each virtual
+    /// register. There may be multiple current definitions for a register with
+    /// disjunct lanemasks.
+    VReg2SUnitMultiMap CurrentVRegDefs;
+    /// Tracks the last instructions in this region using each virtual register.
+    VReg2SUnitOperIdxMultiMap CurrentVRegUses;
+
+    AliasAnalysis *AAForDep = nullptr;
+
+    /// Remember a generic side-effecting instruction as we proceed.
+    /// No other SU ever gets scheduled around it (except in the special
+    /// case of a huge region that gets reduced).
+    SUnit *BarrierChain = nullptr;
+
+  public:
+    /// A list of SUnits, used in Value2SUsMap, during DAG construction.
+    /// Note: to gain speed it might be worth investigating an optimized
+    /// implementation of this data structure, such as a singly linked list
+    /// with a memory pool (SmallVector was tried but slow and SparseSet is not
+    /// applicable).
+    using SUList = std::list<SUnit *>;
+
+  protected:
+    /// \brief A map from ValueType to SUList, used during DAG construction, as
+    /// a means of remembering which SUs depend on which memory locations.
+    class Value2SUsMap;
+
+    /// Reduces maps in FIFO order, by N SUs. This is better than turning
+    /// every Nth memory SU into BarrierChain in buildSchedGraph(), since
+    /// it avoids unnecessary edges between seen SUs above the new BarrierChain,
+    /// and those below it.
+    void reduceHugeMemNodeMaps(Value2SUsMap &stores,
+                               Value2SUsMap &loads, unsigned N);
+
+    /// \brief Adds a chain edge between SUa and SUb, but only if both
+    /// AliasAnalysis and Target fail to deny the dependency.
+    void addChainDependency(SUnit *SUa, SUnit *SUb,
+                            unsigned Latency = 0);
+
+    /// Adds dependencies as needed from all SUs in list to SU.
+    void addChainDependencies(SUnit *SU, SUList &SUs, unsigned Latency) {
+      for (SUnit *Entry : SUs)
+        addChainDependency(SU, Entry, Latency);
+    }
+
+    /// Adds dependencies as needed from all SUs in map, to SU.
+    void addChainDependencies(SUnit *SU, Value2SUsMap &Val2SUsMap);
+
+    /// Adds dependencies as needed to SU, from all SUs mapped to V.
+    void addChainDependencies(SUnit *SU, Value2SUsMap &Val2SUsMap,
+                              ValueType V);
+
+    /// Adds barrier chain edges from all SUs in map, and then clear the map.
+    /// This is equivalent to insertBarrierChain(), but optimized for the common
+    /// case where the new BarrierChain (a global memory object) has a higher
+    /// NodeNum than all SUs in map. It is assumed BarrierChain has been set
+    /// before calling this.
+    void addBarrierChain(Value2SUsMap &map);
+
+    /// Inserts a barrier chain in a huge region, far below current SU.
+    /// Adds barrier chain edges from all SUs in map with higher NodeNums than
+    /// this new BarrierChain, and remove them from map. It is assumed
+    /// BarrierChain has been set before calling this.
+    void insertBarrierChain(Value2SUsMap &map);
+
+    /// For an unanalyzable memory access, this Value is used in maps.
+    UndefValue *UnknownValue;
+
+    using DbgValueVector =
+        std::vector<std::pair<MachineInstr *, MachineInstr *>>;
+    /// Remember instruction that precedes DBG_VALUE.
+    /// These are generated by buildSchedGraph but persist so they can be
+    /// referenced when emitting the final schedule.
+    DbgValueVector DbgValues;
+    MachineInstr *FirstDbgValue = nullptr;
+
+    /// Set of live physical registers for updating kill flags.
+    LivePhysRegs LiveRegs;
+
+  public:
+    explicit ScheduleDAGInstrs(MachineFunction &mf,
+                               const MachineLoopInfo *mli,
+                               bool RemoveKillFlags = false);
+
+    ~ScheduleDAGInstrs() override = default;
+
+    /// Gets the machine model for instruction scheduling.
+    const TargetSchedModel *getSchedModel() const { return &SchedModel; }
+
+    /// Resolves and cache a resolved scheduling class for an SUnit.
+    const MCSchedClassDesc *getSchedClass(SUnit *SU) const {
+      if (!SU->SchedClass && SchedModel.hasInstrSchedModel())
+        SU->SchedClass = SchedModel.resolveSchedClass(SU->getInstr());
+      return SU->SchedClass;
+    }
+
+    /// Returns an iterator to the top of the current scheduling region.
+    MachineBasicBlock::iterator begin() const { return RegionBegin; }
+
+    /// Returns an iterator to the bottom of the current scheduling region.
+    MachineBasicBlock::iterator end() const { return RegionEnd; }
+
+    /// Creates a new SUnit and return a ptr to it.
+    SUnit *newSUnit(MachineInstr *MI);
+
+    /// Returns an existing SUnit for this MI, or nullptr.
+    SUnit *getSUnit(MachineInstr *MI) const;
+
+    /// If this method returns true, handling of the scheduling regions
+    /// themselves (in case of a scheduling boundary in MBB) will be done
+    /// beginning with the topmost region of MBB.
+    virtual bool doMBBSchedRegionsTopDown() const { return false; }
+
+    /// Prepares to perform scheduling in the given block.
+    virtual void startBlock(MachineBasicBlock *BB);
+
+    /// Cleans up after scheduling in the given block.
+    virtual void finishBlock();
+
+    /// \brief Initialize the DAG and common scheduler state for a new
+    /// scheduling region. This does not actually create the DAG, only clears
+    /// it. The scheduling driver may call BuildSchedGraph multiple times per
+    /// scheduling region.
+    virtual void enterRegion(MachineBasicBlock *bb,
+                             MachineBasicBlock::iterator begin,
+                             MachineBasicBlock::iterator end,
+                             unsigned regioninstrs);
+
+    /// Called when the scheduler has finished scheduling the current region.
+    virtual void exitRegion();
+
+    /// Builds SUnits for the current region.
+    /// If \p RPTracker is non-null, compute register pressure as a side effect.
+    /// The DAG builder is an efficient place to do it because it already visits
+    /// operands.
+    void buildSchedGraph(AliasAnalysis *AA,
+                         RegPressureTracker *RPTracker = nullptr,
+                         PressureDiffs *PDiffs = nullptr,
+                         LiveIntervals *LIS = nullptr,
+                         bool TrackLaneMasks = false);
+
+    /// \brief Adds dependencies from instructions in the current list of
+    /// instructions being scheduled to scheduling barrier. We want to make sure
+    /// instructions which define registers that are either used by the
+    /// terminator or are live-out are properly scheduled. This is especially
+    /// important when the definition latency of the return value(s) are too
+    /// high to be hidden by the branch or when the liveout registers used by
+    /// instructions in the fallthrough block.
+    void addSchedBarrierDeps();
+
+    /// Orders nodes according to selected style.
+    ///
+    /// Typically, a scheduling algorithm will implement schedule() without
+    /// overriding enterRegion() or exitRegion().
+    virtual void schedule() = 0;
+
+    /// Allow targets to perform final scheduling actions at the level of the
+    /// whole MachineFunction. By default does nothing.
+    virtual void finalizeSchedule() {}
+
+    void dumpNode(const SUnit *SU) const override;
+
+    /// Returns a label for a DAG node that points to an instruction.
+    std::string getGraphNodeLabel(const SUnit *SU) const override;
+
+    /// Returns a label for the region of code covered by the DAG.
+    std::string getDAGName() const override;
+
+    /// Fixes register kill flags that scheduling has made invalid.
+    void fixupKills(MachineBasicBlock &MBB);
+
+  protected:
+    void initSUnits();
+    void addPhysRegDataDeps(SUnit *SU, unsigned OperIdx);
+    void addPhysRegDeps(SUnit *SU, unsigned OperIdx);
+    void addVRegDefDeps(SUnit *SU, unsigned OperIdx);
+    void addVRegUseDeps(SUnit *SU, unsigned OperIdx);
+
+    /// Initializes register live-range state for updating kills.
+    /// PostRA helper for rewriting kill flags.
+    void startBlockForKills(MachineBasicBlock *BB);
+
+    /// Toggles a register operand kill flag.
+    ///
+    /// Other adjustments may be made to the instruction if necessary. Return
+    /// true if the operand has been deleted, false if not.
+    void toggleKillFlag(MachineInstr &MI, MachineOperand &MO);
+
+    /// Returns a mask for which lanes get read/written by the given (register)
+    /// machine operand.
+    LaneBitmask getLaneMaskForMO(const MachineOperand &MO) const;
+  };
+
+  /// Creates a new SUnit and return a ptr to it.
+  inline SUnit *ScheduleDAGInstrs::newSUnit(MachineInstr *MI) {
+#ifndef NDEBUG
+    const SUnit *Addr = SUnits.empty() ? nullptr : &SUnits[0];
+#endif
+    SUnits.emplace_back(MI, (unsigned)SUnits.size());
+    assert((Addr == nullptr || Addr == &SUnits[0]) &&
+           "SUnits std::vector reallocated on the fly!");
+    return &SUnits.back();
+  }
+
+  /// Returns an existing SUnit for this MI, or nullptr.
+  inline SUnit *ScheduleDAGInstrs::getSUnit(MachineInstr *MI) const {
+    DenseMap<MachineInstr*, SUnit*>::const_iterator I = MISUnitMap.find(MI);
+    if (I == MISUnitMap.end())
+      return nullptr;
+    return I->second;
+  }
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SCHEDULEDAGINSTRS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGMutation.h b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGMutation.h
new file mode 100644
index 0000000..5c23642
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ScheduleDAGMutation.h
@@ -0,0 +1,34 @@
+//===- ScheduleDAGMutation.h - MachineInstr Scheduling ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ScheduleDAGMutation class, which represents
+// a target-specific mutation of the dependency graph for scheduling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULEDAGMUTATION_H
+#define LLVM_CODEGEN_SCHEDULEDAGMUTATION_H
+
+namespace llvm {
+
+class ScheduleDAGInstrs;
+
+/// Mutate the DAG as a postpass after normal DAG building.
+class ScheduleDAGMutation {
+  virtual void anchor();
+
+public:
+  virtual ~ScheduleDAGMutation() = default;
+
+  virtual void apply(ScheduleDAGInstrs *DAG) = 0;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SCHEDULEDAGMUTATION_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ScheduleDFS.h b/linux-x64/clang/include/llvm/CodeGen/ScheduleDFS.h
new file mode 100644
index 0000000..d6a8c79
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ScheduleDFS.h
@@ -0,0 +1,194 @@
+//===- ScheduleDAGILP.h - ILP metric for ScheduleDAGInstrs ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Definition of an ILP metric for machine level instruction scheduling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULEDFS_H
+#define LLVM_CODEGEN_SCHEDULEDFS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include <cassert>
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+
+/// \brief Represent the ILP of the subDAG rooted at a DAG node.
+///
+/// ILPValues summarize the DAG subtree rooted at each node. ILPValues are
+/// valid for all nodes regardless of their subtree membership.
+///
+/// When computed using bottom-up DFS, this metric assumes that the DAG is a
+/// forest of trees with roots at the bottom of the schedule branching upward.
+struct ILPValue {
+  unsigned InstrCount;
+  /// Length may either correspond to depth or height, depending on direction,
+  /// and cycles or nodes depending on context.
+  unsigned Length;
+
+  ILPValue(unsigned count, unsigned length):
+    InstrCount(count), Length(length) {}
+
+  // Order by the ILP metric's value.
+  bool operator<(ILPValue RHS) const {
+    return (uint64_t)InstrCount * RHS.Length
+      < (uint64_t)Length * RHS.InstrCount;
+  }
+  bool operator>(ILPValue RHS) const {
+    return RHS < *this;
+  }
+  bool operator<=(ILPValue RHS) const {
+    return (uint64_t)InstrCount * RHS.Length
+      <= (uint64_t)Length * RHS.InstrCount;
+  }
+  bool operator>=(ILPValue RHS) const {
+    return RHS <= *this;
+  }
+
+  void print(raw_ostream &OS) const;
+
+  void dump() const;
+};
+
+/// \brief Compute the values of each DAG node for various metrics during DFS.
+class SchedDFSResult {
+  friend class SchedDFSImpl;
+
+  static const unsigned InvalidSubtreeID = ~0u;
+
+  /// \brief Per-SUnit data computed during DFS for various metrics.
+  ///
+  /// A node's SubtreeID is set to itself when it is visited to indicate that it
+  /// is the root of a subtree. Later it is set to its parent to indicate an
+  /// interior node. Finally, it is set to a representative subtree ID during
+  /// finalization.
+  struct NodeData {
+    unsigned InstrCount = 0;
+    unsigned SubtreeID = InvalidSubtreeID;
+
+    NodeData() = default;
+  };
+
+  /// \brief Per-Subtree data computed during DFS.
+  struct TreeData {
+    unsigned ParentTreeID = InvalidSubtreeID;
+    unsigned SubInstrCount = 0;
+
+    TreeData() = default;
+  };
+
+  /// \brief Record a connection between subtrees and the connection level.
+  struct Connection {
+    unsigned TreeID;
+    unsigned Level;
+
+    Connection(unsigned tree, unsigned level): TreeID(tree), Level(level) {}
+  };
+
+  bool IsBottomUp;
+  unsigned SubtreeLimit;
+  /// DFS results for each SUnit in this DAG.
+  std::vector<NodeData> DFSNodeData;
+
+  // Store per-tree data indexed on tree ID,
+  SmallVector<TreeData, 16> DFSTreeData;
+
+  // For each subtree discovered during DFS, record its connections to other
+  // subtrees.
+  std::vector<SmallVector<Connection, 4>> SubtreeConnections;
+
+  /// Cache the current connection level of each subtree.
+  /// This mutable array is updated during scheduling.
+  std::vector<unsigned> SubtreeConnectLevels;
+
+public:
+  SchedDFSResult(bool IsBU, unsigned lim)
+    : IsBottomUp(IsBU), SubtreeLimit(lim) {}
+
+  /// \brief Get the node cutoff before subtrees are considered significant.
+  unsigned getSubtreeLimit() const { return SubtreeLimit; }
+
+  /// \brief Return true if this DFSResult is uninitialized.
+  ///
+  /// resize() initializes DFSResult, while compute() populates it.
+  bool empty() const { return DFSNodeData.empty(); }
+
+  /// \brief Clear the results.
+  void clear() {
+    DFSNodeData.clear();
+    DFSTreeData.clear();
+    SubtreeConnections.clear();
+    SubtreeConnectLevels.clear();
+  }
+
+  /// \brief Initialize the result data with the size of the DAG.
+  void resize(unsigned NumSUnits) {
+    DFSNodeData.resize(NumSUnits);
+  }
+
+  /// \brief Compute various metrics for the DAG with given roots.
+  void compute(ArrayRef<SUnit> SUnits);
+
+  /// \brief Get the number of instructions in the given subtree and its
+  /// children.
+  unsigned getNumInstrs(const SUnit *SU) const {
+    return DFSNodeData[SU->NodeNum].InstrCount;
+  }
+
+  /// \brief Get the number of instructions in the given subtree not including
+  /// children.
+  unsigned getNumSubInstrs(unsigned SubtreeID) const {
+    return DFSTreeData[SubtreeID].SubInstrCount;
+  }
+
+  /// \brief Get the ILP value for a DAG node.
+  ///
+  /// A leaf node has an ILP of 1/1.
+  ILPValue getILP(const SUnit *SU) const {
+    return ILPValue(DFSNodeData[SU->NodeNum].InstrCount, 1 + SU->getDepth());
+  }
+
+  /// \brief The number of subtrees detected in this DAG.
+  unsigned getNumSubtrees() const { return SubtreeConnectLevels.size(); }
+
+  /// \brief Get the ID of the subtree the given DAG node belongs to.
+  ///
+  /// For convenience, if DFSResults have not been computed yet, give everything
+  /// tree ID 0.
+  unsigned getSubtreeID(const SUnit *SU) const {
+    if (empty())
+      return 0;
+    assert(SU->NodeNum < DFSNodeData.size() &&  "New Node");
+    return DFSNodeData[SU->NodeNum].SubtreeID;
+  }
+
+  /// \brief Get the connection level of a subtree.
+  ///
+  /// For bottom-up trees, the connection level is the latency depth (in cycles)
+  /// of the deepest connection to another subtree.
+  unsigned getSubtreeLevel(unsigned SubtreeID) const {
+    return SubtreeConnectLevels[SubtreeID];
+  }
+
+  /// \brief Scheduler callback to update SubtreeConnectLevels when a tree is
+  /// initially scheduled.
+  void scheduleTree(unsigned SubtreeID);
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SCHEDULEDFS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ScheduleHazardRecognizer.h b/linux-x64/clang/include/llvm/CodeGen/ScheduleHazardRecognizer.h
new file mode 100644
index 0000000..ace4a2d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ScheduleHazardRecognizer.h
@@ -0,0 +1,122 @@
+//=- llvm/CodeGen/ScheduleHazardRecognizer.h - Scheduling Support -*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ScheduleHazardRecognizer class, which implements
+// hazard-avoidance heuristics for scheduling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULEHAZARDRECOGNIZER_H
+#define LLVM_CODEGEN_SCHEDULEHAZARDRECOGNIZER_H
+
+namespace llvm {
+
+class MachineInstr;
+class SUnit;
+
+/// HazardRecognizer - This determines whether or not an instruction can be
+/// issued this cycle, and whether or not a noop needs to be inserted to handle
+/// the hazard.
+class ScheduleHazardRecognizer {
+protected:
+  /// MaxLookAhead - Indicate the number of cycles in the scoreboard
+  /// state. Important to restore the state after backtracking. Additionally,
+  /// MaxLookAhead=0 identifies a fake recognizer, allowing the client to
+  /// bypass virtual calls. Currently the PostRA scheduler ignores it.
+  unsigned MaxLookAhead = 0;
+
+public:
+  ScheduleHazardRecognizer() = default;
+  virtual ~ScheduleHazardRecognizer();
+
+  enum HazardType {
+    NoHazard,      // This instruction can be emitted at this cycle.
+    Hazard,        // This instruction can't be emitted at this cycle.
+    NoopHazard     // This instruction can't be emitted, and needs noops.
+  };
+
+  unsigned getMaxLookAhead() const { return MaxLookAhead; }
+
+  bool isEnabled() const { return MaxLookAhead != 0; }
+
+  /// atIssueLimit - Return true if no more instructions may be issued in this
+  /// cycle.
+  ///
+  /// FIXME: remove this once MachineScheduler is the only client.
+  virtual bool atIssueLimit() const { return false; }
+
+  /// getHazardType - Return the hazard type of emitting this node.  There are
+  /// three possible results.  Either:
+  ///  * NoHazard: it is legal to issue this instruction on this cycle.
+  ///  * Hazard: issuing this instruction would stall the machine.  If some
+  ///     other instruction is available, issue it first.
+  ///  * NoopHazard: issuing this instruction would break the program.  If
+  ///     some other instruction can be issued, do so, otherwise issue a noop.
+  virtual HazardType getHazardType(SUnit *m, int Stalls = 0) {
+    return NoHazard;
+  }
+
+  /// Reset - This callback is invoked when a new block of
+  /// instructions is about to be schedule. The hazard state should be
+  /// set to an initialized state.
+  virtual void Reset() {}
+
+  /// EmitInstruction - This callback is invoked when an instruction is
+  /// emitted, to advance the hazard state.
+  virtual void EmitInstruction(SUnit *) {}
+
+  /// This overload will be used when the hazard recognizer is being used
+  /// by a non-scheduling pass, which does not use SUnits.
+  virtual void EmitInstruction(MachineInstr *) {}
+
+  /// PreEmitNoops - This callback is invoked prior to emitting an instruction.
+  /// It should return the number of noops to emit prior to the provided
+  /// instruction.
+  /// Note: This is only used during PostRA scheduling. EmitNoop is not called
+  /// for these noops.
+  virtual unsigned PreEmitNoops(SUnit *) {
+    return 0;
+  }
+
+  /// This overload will be used when the hazard recognizer is being used
+  /// by a non-scheduling pass, which does not use SUnits.
+  virtual unsigned PreEmitNoops(MachineInstr *) {
+    return 0;
+  }
+
+  /// ShouldPreferAnother - This callback may be invoked if getHazardType
+  /// returns NoHazard. If, even though there is no hazard, it would be better to
+  /// schedule another available instruction, this callback should return true.
+  virtual bool ShouldPreferAnother(SUnit *) {
+    return false;
+  }
+
+  /// AdvanceCycle - This callback is invoked whenever the next top-down
+  /// instruction to be scheduled cannot issue in the current cycle, either
+  /// because of latency or resource conflicts.  This should increment the
+  /// internal state of the hazard recognizer so that previously "Hazard"
+  /// instructions will now not be hazards.
+  virtual void AdvanceCycle() {}
+
+  /// RecedeCycle - This callback is invoked whenever the next bottom-up
+  /// instruction to be scheduled cannot issue in the current cycle, either
+  /// because of latency or resource conflicts.
+  virtual void RecedeCycle() {}
+
+  /// EmitNoop - This callback is invoked when a noop was added to the
+  /// instruction stream.
+  virtual void EmitNoop() {
+    // Default implementation: count it as a cycle.
+    AdvanceCycle();
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SCHEDULEHAZARDRECOGNIZER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/SchedulerRegistry.h b/linux-x64/clang/include/llvm/CodeGen/SchedulerRegistry.h
new file mode 100644
index 0000000..badf927
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/SchedulerRegistry.h
@@ -0,0 +1,105 @@
+//===- llvm/CodeGen/SchedulerRegistry.h -------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation for instruction scheduler function
+// pass registry (RegisterScheduler).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULERREGISTRY_H
+#define LLVM_CODEGEN_SCHEDULERREGISTRY_H
+
+#include "llvm/CodeGen/MachinePassRegistry.h"
+#include "llvm/Support/CodeGen.h"
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+///
+/// RegisterScheduler class - Track the registration of instruction schedulers.
+///
+//===----------------------------------------------------------------------===//
+
+class ScheduleDAGSDNodes;
+class SelectionDAGISel;
+
+class RegisterScheduler : public MachinePassRegistryNode {
+public:
+  using FunctionPassCtor = ScheduleDAGSDNodes *(*)(SelectionDAGISel*,
+                                                   CodeGenOpt::Level);
+
+  static MachinePassRegistry Registry;
+
+  RegisterScheduler(const char *N, const char *D, FunctionPassCtor C)
+  : MachinePassRegistryNode(N, D, (MachinePassCtor)C)
+  { Registry.Add(this); }
+  ~RegisterScheduler() { Registry.Remove(this); }
+
+
+  // Accessors.
+  RegisterScheduler *getNext() const {
+    return (RegisterScheduler *)MachinePassRegistryNode::getNext();
+  }
+
+  static RegisterScheduler *getList() {
+    return (RegisterScheduler *)Registry.getList();
+  }
+
+  static void setListener(MachinePassRegistryListener *L) {
+    Registry.setListener(L);
+  }
+};
+
+/// createBURRListDAGScheduler - This creates a bottom up register usage
+/// reduction list scheduler.
+ScheduleDAGSDNodes *createBURRListDAGScheduler(SelectionDAGISel *IS,
+                                               CodeGenOpt::Level OptLevel);
+
+/// createBURRListDAGScheduler - This creates a bottom up list scheduler that
+/// schedules nodes in source code order when possible.
+ScheduleDAGSDNodes *createSourceListDAGScheduler(SelectionDAGISel *IS,
+                                                 CodeGenOpt::Level OptLevel);
+
+/// createHybridListDAGScheduler - This creates a bottom up register pressure
+/// aware list scheduler that make use of latency information to avoid stalls
+/// for long latency instructions in low register pressure mode. In high
+/// register pressure mode it schedules to reduce register pressure.
+ScheduleDAGSDNodes *createHybridListDAGScheduler(SelectionDAGISel *IS,
+                                                 CodeGenOpt::Level);
+
+/// createILPListDAGScheduler - This creates a bottom up register pressure
+/// aware list scheduler that tries to increase instruction level parallelism
+/// in low register pressure mode. In high register pressure mode it schedules
+/// to reduce register pressure.
+ScheduleDAGSDNodes *createILPListDAGScheduler(SelectionDAGISel *IS,
+                                              CodeGenOpt::Level);
+
+/// createFastDAGScheduler - This creates a "fast" scheduler.
+///
+ScheduleDAGSDNodes *createFastDAGScheduler(SelectionDAGISel *IS,
+                                           CodeGenOpt::Level OptLevel);
+
+/// createVLIWDAGScheduler - Scheduler for VLIW targets. This creates top down
+/// DFA driven list scheduler with clustering heuristic to control
+/// register pressure.
+ScheduleDAGSDNodes *createVLIWDAGScheduler(SelectionDAGISel *IS,
+                                           CodeGenOpt::Level OptLevel);
+/// createDefaultScheduler - This creates an instruction scheduler appropriate
+/// for the target.
+ScheduleDAGSDNodes *createDefaultScheduler(SelectionDAGISel *IS,
+                                           CodeGenOpt::Level OptLevel);
+
+/// createDAGLinearizer - This creates a "no-scheduling" scheduler which
+/// linearize the DAG using topological order.
+ScheduleDAGSDNodes *createDAGLinearizer(SelectionDAGISel *IS,
+                                        CodeGenOpt::Level OptLevel);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SCHEDULERREGISTRY_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ScoreboardHazardRecognizer.h b/linux-x64/clang/include/llvm/CodeGen/ScoreboardHazardRecognizer.h
new file mode 100644
index 0000000..466ab53
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ScoreboardHazardRecognizer.h
@@ -0,0 +1,128 @@
+//=- llvm/CodeGen/ScoreboardHazardRecognizer.h - Schedule Support -*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ScoreboardHazardRecognizer class, which
+// encapsulates hazard-avoidance heuristics for scheduling, based on the
+// scheduling itineraries specified for the target.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCOREBOARDHAZARDRECOGNIZER_H
+#define LLVM_CODEGEN_SCOREBOARDHAZARDRECOGNIZER_H
+
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include <cassert>
+#include <cstddef>
+#include <cstring>
+
+namespace llvm {
+
+class InstrItineraryData;
+class ScheduleDAG;
+class SUnit;
+
+class ScoreboardHazardRecognizer : public ScheduleHazardRecognizer {
+  // Scoreboard to track function unit usage. Scoreboard[0] is a
+  // mask of the FUs in use in the cycle currently being
+  // schedule. Scoreboard[1] is a mask for the next cycle. The
+  // Scoreboard is used as a circular buffer with the current cycle
+  // indicated by Head.
+  //
+  // Scoreboard always counts cycles in forward execution order. If used by a
+  // bottom-up scheduler, then the scoreboard cycles are the inverse of the
+  // scheduler's cycles.
+  class Scoreboard {
+    unsigned *Data = nullptr;
+
+    // The maximum number of cycles monitored by the Scoreboard. This
+    // value is determined based on the target itineraries to ensure
+    // that all hazards can be tracked.
+    size_t Depth = 0;
+
+    // Indices into the Scoreboard that represent the current cycle.
+    size_t Head = 0;
+
+  public:
+    Scoreboard() = default;
+
+    ~Scoreboard() {
+      delete[] Data;
+    }
+
+    size_t getDepth() const { return Depth; }
+
+    unsigned& operator[](size_t idx) const {
+      // Depth is expected to be a power-of-2.
+      assert(Depth && !(Depth & (Depth - 1)) &&
+             "Scoreboard was not initialized properly!");
+
+      return Data[(Head + idx) & (Depth-1)];
+    }
+
+    void reset(size_t d = 1) {
+      if (!Data) {
+        Depth = d;
+        Data = new unsigned[Depth];
+      }
+
+      memset(Data, 0, Depth * sizeof(Data[0]));
+      Head = 0;
+    }
+
+    void advance() {
+      Head = (Head + 1) & (Depth-1);
+    }
+
+    void recede() {
+      Head = (Head - 1) & (Depth-1);
+    }
+
+    // Print the scoreboard.
+    void dump() const;
+  };
+
+  // Support for tracing ScoreboardHazardRecognizer as a component within
+  // another module.
+  const char *DebugType;
+
+  // Itinerary data for the target.
+  const InstrItineraryData *ItinData;
+
+  const ScheduleDAG *DAG;
+
+  /// IssueWidth - Max issue per cycle. 0=Unknown.
+  unsigned IssueWidth = 0;
+
+  /// IssueCount - Count instructions issued in this cycle.
+  unsigned IssueCount = 0;
+
+  Scoreboard ReservedScoreboard;
+  Scoreboard RequiredScoreboard;
+
+public:
+  ScoreboardHazardRecognizer(const InstrItineraryData *ItinData,
+                             const ScheduleDAG *DAG,
+                             const char *ParentDebugType = "");
+
+  /// atIssueLimit - Return true if no more instructions may be issued in this
+  /// cycle.
+  bool atIssueLimit() const override;
+
+  // Stalls provides an cycle offset at which SU will be scheduled. It will be
+  // negative for bottom-up scheduling.
+  HazardType getHazardType(SUnit *SU, int Stalls) override;
+  void Reset() override;
+  void EmitInstruction(SUnit *SU) override;
+  void AdvanceCycle() override;
+  void RecedeCycle() override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SCOREBOARDHAZARDRECOGNIZER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/SelectionDAG.h b/linux-x64/clang/include/llvm/CodeGen/SelectionDAG.h
new file mode 100644
index 0000000..af43c9b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/SelectionDAG.h
@@ -0,0 +1,1611 @@
+//===- llvm/CodeGen/SelectionDAG.h - InstSelection DAG ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SelectionDAG class, and transitively defines the
+// SDNode class and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SELECTIONDAG_H
+#define LLVM_CODEGEN_SELECTIONDAG_H
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/DivergenceAnalysis.h"
+#include "llvm/CodeGen/DAGCombine.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/ArrayRecycler.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachineValueType.h"
+#include "llvm/Support/RecyclingAllocator.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <functional>
+#include <map>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class BlockAddress;
+class Constant;
+class ConstantFP;
+class ConstantInt;
+class DataLayout;
+struct fltSemantics;
+class GlobalValue;
+struct KnownBits;
+class LLVMContext;
+class MachineBasicBlock;
+class MachineConstantPoolValue;
+class MCSymbol;
+class OptimizationRemarkEmitter;
+class SDDbgValue;
+class SelectionDAG;
+class SelectionDAGTargetInfo;
+class TargetLibraryInfo;
+class TargetLowering;
+class TargetMachine;
+class TargetSubtargetInfo;
+class Value;
+
+class SDVTListNode : public FoldingSetNode {
+  friend struct FoldingSetTrait<SDVTListNode>;
+
+  /// A reference to an Interned FoldingSetNodeID for this node.
+  /// The Allocator in SelectionDAG holds the data.
+  /// SDVTList contains all types which are frequently accessed in SelectionDAG.
+  /// The size of this list is not expected to be big so it won't introduce
+  /// a memory penalty.
+  FoldingSetNodeIDRef FastID;
+  const EVT *VTs;
+  unsigned int NumVTs;
+  /// The hash value for SDVTList is fixed, so cache it to avoid
+  /// hash calculation.
+  unsigned HashValue;
+
+public:
+  SDVTListNode(const FoldingSetNodeIDRef ID, const EVT *VT, unsigned int Num) :
+      FastID(ID), VTs(VT), NumVTs(Num) {
+    HashValue = ID.ComputeHash();
+  }
+
+  SDVTList getSDVTList() {
+    SDVTList result = {VTs, NumVTs};
+    return result;
+  }
+};
+
+/// Specialize FoldingSetTrait for SDVTListNode
+/// to avoid computing temp FoldingSetNodeID and hash value.
+template<> struct FoldingSetTrait<SDVTListNode> : DefaultFoldingSetTrait<SDVTListNode> {
+  static void Profile(const SDVTListNode &X, FoldingSetNodeID& ID) {
+    ID = X.FastID;
+  }
+
+  static bool Equals(const SDVTListNode &X, const FoldingSetNodeID &ID,
+                     unsigned IDHash, FoldingSetNodeID &TempID) {
+    if (X.HashValue != IDHash)
+      return false;
+    return ID == X.FastID;
+  }
+
+  static unsigned ComputeHash(const SDVTListNode &X, FoldingSetNodeID &TempID) {
+    return X.HashValue;
+  }
+};
+
+template <> struct ilist_alloc_traits<SDNode> {
+  static void deleteNode(SDNode *) {
+    llvm_unreachable("ilist_traits<SDNode> shouldn't see a deleteNode call!");
+  }
+};
+
+/// Keeps track of dbg_value information through SDISel.  We do
+/// not build SDNodes for these so as not to perturb the generated code;
+/// instead the info is kept off to the side in this structure. Each SDNode may
+/// have one or more associated dbg_value entries. This information is kept in
+/// DbgValMap.
+/// Byval parameters are handled separately because they don't use alloca's,
+/// which busts the normal mechanism.  There is good reason for handling all
+/// parameters separately:  they may not have code generated for them, they
+/// should always go at the beginning of the function regardless of other code
+/// motion, and debug info for them is potentially useful even if the parameter
+/// is unused.  Right now only byval parameters are handled separately.
+class SDDbgInfo {
+  BumpPtrAllocator Alloc;
+  SmallVector<SDDbgValue*, 32> DbgValues;
+  SmallVector<SDDbgValue*, 32> ByvalParmDbgValues;
+  using DbgValMapType = DenseMap<const SDNode *, SmallVector<SDDbgValue *, 2>>;
+  DbgValMapType DbgValMap;
+
+public:
+  SDDbgInfo() = default;
+  SDDbgInfo(const SDDbgInfo &) = delete;
+  SDDbgInfo &operator=(const SDDbgInfo &) = delete;
+
+  void add(SDDbgValue *V, const SDNode *Node, bool isParameter) {
+    if (isParameter) {
+      ByvalParmDbgValues.push_back(V);
+    } else     DbgValues.push_back(V);
+    if (Node)
+      DbgValMap[Node].push_back(V);
+  }
+
+  /// \brief Invalidate all DbgValues attached to the node and remove
+  /// it from the Node-to-DbgValues map.
+  void erase(const SDNode *Node);
+
+  void clear() {
+    DbgValMap.clear();
+    DbgValues.clear();
+    ByvalParmDbgValues.clear();
+    Alloc.Reset();
+  }
+
+  BumpPtrAllocator &getAlloc() { return Alloc; }
+
+  bool empty() const {
+    return DbgValues.empty() && ByvalParmDbgValues.empty();
+  }
+
+  ArrayRef<SDDbgValue*> getSDDbgValues(const SDNode *Node) {
+    DbgValMapType::iterator I = DbgValMap.find(Node);
+    if (I != DbgValMap.end())
+      return I->second;
+    return ArrayRef<SDDbgValue*>();
+  }
+
+  using DbgIterator = SmallVectorImpl<SDDbgValue*>::iterator;
+
+  DbgIterator DbgBegin() { return DbgValues.begin(); }
+  DbgIterator DbgEnd()   { return DbgValues.end(); }
+  DbgIterator ByvalParmDbgBegin() { return ByvalParmDbgValues.begin(); }
+  DbgIterator ByvalParmDbgEnd()   { return ByvalParmDbgValues.end(); }
+};
+
+void checkForCycles(const SelectionDAG *DAG, bool force = false);
+
+/// This is used to represent a portion of an LLVM function in a low-level
+/// Data Dependence DAG representation suitable for instruction selection.
+/// This DAG is constructed as the first step of instruction selection in order
+/// to allow implementation of machine specific optimizations
+/// and code simplifications.
+///
+/// The representation used by the SelectionDAG is a target-independent
+/// representation, which has some similarities to the GCC RTL representation,
+/// but is significantly more simple, powerful, and is a graph form instead of a
+/// linear form.
+///
+class SelectionDAG {
+  const TargetMachine &TM;
+  const SelectionDAGTargetInfo *TSI = nullptr;
+  const TargetLowering *TLI = nullptr;
+  const TargetLibraryInfo *LibInfo = nullptr;
+  MachineFunction *MF;
+  Pass *SDAGISelPass = nullptr;
+  LLVMContext *Context;
+  CodeGenOpt::Level OptLevel;
+
+  DivergenceAnalysis * DA = nullptr;
+  FunctionLoweringInfo * FLI = nullptr;
+
+  /// The function-level optimization remark emitter.  Used to emit remarks
+  /// whenever manipulating the DAG.
+  OptimizationRemarkEmitter *ORE;
+
+  /// The starting token.
+  SDNode EntryNode;
+
+  /// The root of the entire DAG.
+  SDValue Root;
+
+  /// A linked list of nodes in the current DAG.
+  ilist<SDNode> AllNodes;
+
+  /// The AllocatorType for allocating SDNodes. We use
+  /// pool allocation with recycling.
+  using NodeAllocatorType = RecyclingAllocator<BumpPtrAllocator, SDNode,
+                                               sizeof(LargestSDNode),
+                                               alignof(MostAlignedSDNode)>;
+
+  /// Pool allocation for nodes.
+  NodeAllocatorType NodeAllocator;
+
+  /// This structure is used to memoize nodes, automatically performing
+  /// CSE with existing nodes when a duplicate is requested.
+  FoldingSet<SDNode> CSEMap;
+
+  /// Pool allocation for machine-opcode SDNode operands.
+  BumpPtrAllocator OperandAllocator;
+  ArrayRecycler<SDUse> OperandRecycler;
+
+  /// Pool allocation for misc. objects that are created once per SelectionDAG.
+  BumpPtrAllocator Allocator;
+
+  /// Tracks dbg_value information through SDISel.
+  SDDbgInfo *DbgInfo;
+
+  uint16_t NextPersistentId = 0;
+
+public:
+  /// Clients of various APIs that cause global effects on
+  /// the DAG can optionally implement this interface.  This allows the clients
+  /// to handle the various sorts of updates that happen.
+  ///
+  /// A DAGUpdateListener automatically registers itself with DAG when it is
+  /// constructed, and removes itself when destroyed in RAII fashion.
+  struct DAGUpdateListener {
+    DAGUpdateListener *const Next;
+    SelectionDAG &DAG;
+
+    explicit DAGUpdateListener(SelectionDAG &D)
+      : Next(D.UpdateListeners), DAG(D) {
+      DAG.UpdateListeners = this;
+    }
+
+    virtual ~DAGUpdateListener() {
+      assert(DAG.UpdateListeners == this &&
+             "DAGUpdateListeners must be destroyed in LIFO order");
+      DAG.UpdateListeners = Next;
+    }
+
+    /// The node N that was deleted and, if E is not null, an
+    /// equivalent node E that replaced it.
+    virtual void NodeDeleted(SDNode *N, SDNode *E);
+
+    /// The node N that was updated.
+    virtual void NodeUpdated(SDNode *N);
+  };
+
+  struct DAGNodeDeletedListener : public DAGUpdateListener {
+    std::function<void(SDNode *, SDNode *)> Callback;
+
+    DAGNodeDeletedListener(SelectionDAG &DAG,
+                           std::function<void(SDNode *, SDNode *)> Callback)
+        : DAGUpdateListener(DAG), Callback(std::move(Callback)) {}
+
+    void NodeDeleted(SDNode *N, SDNode *E) override { Callback(N, E); }
+  };
+
+  /// When true, additional steps are taken to
+  /// ensure that getConstant() and similar functions return DAG nodes that
+  /// have legal types. This is important after type legalization since
+  /// any illegally typed nodes generated after this point will not experience
+  /// type legalization.
+  bool NewNodesMustHaveLegalTypes = false;
+
+private:
+  /// DAGUpdateListener is a friend so it can manipulate the listener stack.
+  friend struct DAGUpdateListener;
+
+  /// Linked list of registered DAGUpdateListener instances.
+  /// This stack is maintained by DAGUpdateListener RAII.
+  DAGUpdateListener *UpdateListeners = nullptr;
+
+  /// Implementation of setSubgraphColor.
+  /// Return whether we had to truncate the search.
+  bool setSubgraphColorHelper(SDNode *N, const char *Color,
+                              DenseSet<SDNode *> &visited,
+                              int level, bool &printed);
+
+  template <typename SDNodeT, typename... ArgTypes>
+  SDNodeT *newSDNode(ArgTypes &&... Args) {
+    return new (NodeAllocator.template Allocate<SDNodeT>())
+        SDNodeT(std::forward<ArgTypes>(Args)...);
+  }
+
+  /// Build a synthetic SDNodeT with the given args and extract its subclass
+  /// data as an integer (e.g. for use in a folding set).
+  ///
+  /// The args to this function are the same as the args to SDNodeT's
+  /// constructor, except the second arg (assumed to be a const DebugLoc&) is
+  /// omitted.
+  template <typename SDNodeT, typename... ArgTypes>
+  static uint16_t getSyntheticNodeSubclassData(unsigned IROrder,
+                                               ArgTypes &&... Args) {
+    // The compiler can reduce this expression to a constant iff we pass an
+    // empty DebugLoc.  Thankfully, the debug location doesn't have any bearing
+    // on the subclass data.
+    return SDNodeT(IROrder, DebugLoc(), std::forward<ArgTypes>(Args)...)
+        .getRawSubclassData();
+  }
+
+  template <typename SDNodeTy>
+  static uint16_t getSyntheticNodeSubclassData(unsigned Opc, unsigned Order,
+                                                SDVTList VTs, EVT MemoryVT,
+                                                MachineMemOperand *MMO) {
+    return SDNodeTy(Opc, Order, DebugLoc(), VTs, MemoryVT, MMO)
+         .getRawSubclassData();
+  }
+
+  void createOperands(SDNode *Node, ArrayRef<SDValue> Vals);
+
+  void removeOperands(SDNode *Node) {
+    if (!Node->OperandList)
+      return;
+    OperandRecycler.deallocate(
+        ArrayRecycler<SDUse>::Capacity::get(Node->NumOperands),
+        Node->OperandList);
+    Node->NumOperands = 0;
+    Node->OperandList = nullptr;
+  }
+  void CreateTopologicalOrder(std::vector<SDNode*>& Order);
+public:
+  explicit SelectionDAG(const TargetMachine &TM, CodeGenOpt::Level);
+  SelectionDAG(const SelectionDAG &) = delete;
+  SelectionDAG &operator=(const SelectionDAG &) = delete;
+  ~SelectionDAG();
+
+  /// Prepare this SelectionDAG to process code in the given MachineFunction.
+  void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE,
+            Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
+            DivergenceAnalysis * DA);
+
+  void setFunctionLoweringInfo(FunctionLoweringInfo * FuncInfo) {
+    FLI = FuncInfo;
+  }
+
+  /// Clear state and free memory necessary to make this
+  /// SelectionDAG ready to process a new block.
+  void clear();
+
+  MachineFunction &getMachineFunction() const { return *MF; }
+  const Pass *getPass() const { return SDAGISelPass; }
+
+  const DataLayout &getDataLayout() const { return MF->getDataLayout(); }
+  const TargetMachine &getTarget() const { return TM; }
+  const TargetSubtargetInfo &getSubtarget() const { return MF->getSubtarget(); }
+  const TargetLowering &getTargetLoweringInfo() const { return *TLI; }
+  const TargetLibraryInfo &getLibInfo() const { return *LibInfo; }
+  const SelectionDAGTargetInfo &getSelectionDAGInfo() const { return *TSI; }
+  LLVMContext *getContext() const {return Context; }
+  OptimizationRemarkEmitter &getORE() const { return *ORE; }
+
+  /// Pop up a GraphViz/gv window with the DAG rendered using 'dot'.
+  void viewGraph(const std::string &Title);
+  void viewGraph();
+
+#ifndef NDEBUG
+  std::map<const SDNode *, std::string> NodeGraphAttrs;
+#endif
+
+  /// Clear all previously defined node graph attributes.
+  /// Intended to be used from a debugging tool (eg. gdb).
+  void clearGraphAttrs();
+
+  /// Set graph attributes for a node. (eg. "color=red".)
+  void setGraphAttrs(const SDNode *N, const char *Attrs);
+
+  /// Get graph attributes for a node. (eg. "color=red".)
+  /// Used from getNodeAttributes.
+  const std::string getGraphAttrs(const SDNode *N) const;
+
+  /// Convenience for setting node color attribute.
+  void setGraphColor(const SDNode *N, const char *Color);
+
+  /// Convenience for setting subgraph color attribute.
+  void setSubgraphColor(SDNode *N, const char *Color);
+
+  using allnodes_const_iterator = ilist<SDNode>::const_iterator;
+
+  allnodes_const_iterator allnodes_begin() const { return AllNodes.begin(); }
+  allnodes_const_iterator allnodes_end() const { return AllNodes.end(); }
+
+  using allnodes_iterator = ilist<SDNode>::iterator;
+
+  allnodes_iterator allnodes_begin() { return AllNodes.begin(); }
+  allnodes_iterator allnodes_end() { return AllNodes.end(); }
+
+  ilist<SDNode>::size_type allnodes_size() const {
+    return AllNodes.size();
+  }
+
+  iterator_range<allnodes_iterator> allnodes() {
+    return make_range(allnodes_begin(), allnodes_end());
+  }
+  iterator_range<allnodes_const_iterator> allnodes() const {
+    return make_range(allnodes_begin(), allnodes_end());
+  }
+
+  /// Return the root tag of the SelectionDAG.
+  const SDValue &getRoot() const { return Root; }
+
+  /// Return the token chain corresponding to the entry of the function.
+  SDValue getEntryNode() const {
+    return SDValue(const_cast<SDNode *>(&EntryNode), 0);
+  }
+
+  /// Set the current root tag of the SelectionDAG.
+  ///
+  const SDValue &setRoot(SDValue N) {
+    assert((!N.getNode() || N.getValueType() == MVT::Other) &&
+           "DAG root value is not a chain!");
+    if (N.getNode())
+      checkForCycles(N.getNode(), this);
+    Root = N;
+    if (N.getNode())
+      checkForCycles(this);
+    return Root;
+  }
+
+  void VerifyDAGDiverence();
+
+  /// This iterates over the nodes in the SelectionDAG, folding
+  /// certain types of nodes together, or eliminating superfluous nodes.  The
+  /// Level argument controls whether Combine is allowed to produce nodes and
+  /// types that are illegal on the target.
+  void Combine(CombineLevel Level, AliasAnalysis *AA,
+               CodeGenOpt::Level OptLevel);
+
+  /// This transforms the SelectionDAG into a SelectionDAG that
+  /// only uses types natively supported by the target.
+  /// Returns "true" if it made any changes.
+  ///
+  /// Note that this is an involved process that may invalidate pointers into
+  /// the graph.
+  bool LegalizeTypes();
+
+  /// This transforms the SelectionDAG into a SelectionDAG that is
+  /// compatible with the target instruction selector, as indicated by the
+  /// TargetLowering object.
+  ///
+  /// Note that this is an involved process that may invalidate pointers into
+  /// the graph.
+  void Legalize();
+
+  /// \brief Transforms a SelectionDAG node and any operands to it into a node
+  /// that is compatible with the target instruction selector, as indicated by
+  /// the TargetLowering object.
+  ///
+  /// \returns true if \c N is a valid, legal node after calling this.
+  ///
+  /// This essentially runs a single recursive walk of the \c Legalize process
+  /// over the given node (and its operands). This can be used to incrementally
+  /// legalize the DAG. All of the nodes which are directly replaced,
+  /// potentially including N, are added to the output parameter \c
+  /// UpdatedNodes so that the delta to the DAG can be understood by the
+  /// caller.
+  ///
+  /// When this returns false, N has been legalized in a way that make the
+  /// pointer passed in no longer valid. It may have even been deleted from the
+  /// DAG, and so it shouldn't be used further. When this returns true, the
+  /// N passed in is a legal node, and can be immediately processed as such.
+  /// This may still have done some work on the DAG, and will still populate
+  /// UpdatedNodes with any new nodes replacing those originally in the DAG.
+  bool LegalizeOp(SDNode *N, SmallSetVector<SDNode *, 16> &UpdatedNodes);
+
+  /// This transforms the SelectionDAG into a SelectionDAG
+  /// that only uses vector math operations supported by the target.  This is
+  /// necessary as a separate step from Legalize because unrolling a vector
+  /// operation can introduce illegal types, which requires running
+  /// LegalizeTypes again.
+  ///
+  /// This returns true if it made any changes; in that case, LegalizeTypes
+  /// is called again before Legalize.
+  ///
+  /// Note that this is an involved process that may invalidate pointers into
+  /// the graph.
+  bool LegalizeVectors();
+
+  /// This method deletes all unreachable nodes in the SelectionDAG.
+  void RemoveDeadNodes();
+
+  /// Remove the specified node from the system.  This node must
+  /// have no referrers.
+  void DeleteNode(SDNode *N);
+
+  /// Return an SDVTList that represents the list of values specified.
+  SDVTList getVTList(EVT VT);
+  SDVTList getVTList(EVT VT1, EVT VT2);
+  SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3);
+  SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4);
+  SDVTList getVTList(ArrayRef<EVT> VTs);
+
+  //===--------------------------------------------------------------------===//
+  // Node creation methods.
+
+  /// \brief Create a ConstantSDNode wrapping a constant value.
+  /// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
+  ///
+  /// If only legal types can be produced, this does the necessary
+  /// transformations (e.g., if the vector element type is illegal).
+  /// @{
+  SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
+                      bool isTarget = false, bool isOpaque = false);
+  SDValue getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
+                      bool isTarget = false, bool isOpaque = false);
+
+  SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget = false,
+                             bool IsOpaque = false) {
+    return getConstant(APInt::getAllOnesValue(VT.getScalarSizeInBits()), DL,
+                       VT, IsTarget, IsOpaque);
+  }
+
+  SDValue getConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
+                      bool isTarget = false, bool isOpaque = false);
+  SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL,
+                            bool isTarget = false);
+  SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT,
+                            bool isOpaque = false) {
+    return getConstant(Val, DL, VT, true, isOpaque);
+  }
+  SDValue getTargetConstant(const APInt &Val, const SDLoc &DL, EVT VT,
+                            bool isOpaque = false) {
+    return getConstant(Val, DL, VT, true, isOpaque);
+  }
+  SDValue getTargetConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
+                            bool isOpaque = false) {
+    return getConstant(Val, DL, VT, true, isOpaque);
+  }
+
+  /// \brief Create a true or false constant of type \p VT using the target's
+  /// BooleanContent for type \p OpVT.
+  SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT);
+  /// @}
+
+  /// \brief Create a ConstantFPSDNode wrapping a constant value.
+  /// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
+  ///
+  /// If only legal types can be produced, this does the necessary
+  /// transformations (e.g., if the vector element type is illegal).
+  /// The forms that take a double should only be used for simple constants
+  /// that can be exactly represented in VT.  No checks are made.
+  /// @{
+  SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT,
+                        bool isTarget = false);
+  SDValue getConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT,
+                        bool isTarget = false);
+  SDValue getConstantFP(const ConstantFP &CF, const SDLoc &DL, EVT VT,
+                        bool isTarget = false);
+  SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT) {
+    return getConstantFP(Val, DL, VT, true);
+  }
+  SDValue getTargetConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT) {
+    return getConstantFP(Val, DL, VT, true);
+  }
+  SDValue getTargetConstantFP(const ConstantFP &Val, const SDLoc &DL, EVT VT) {
+    return getConstantFP(Val, DL, VT, true);
+  }
+  /// @}
+
+  SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
+                           int64_t offset = 0, bool isTargetGA = false,
+                           unsigned char TargetFlags = 0);
+  SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
+                                 int64_t offset = 0,
+                                 unsigned char TargetFlags = 0) {
+    return getGlobalAddress(GV, DL, VT, offset, true, TargetFlags);
+  }
+  SDValue getFrameIndex(int FI, EVT VT, bool isTarget = false);
+  SDValue getTargetFrameIndex(int FI, EVT VT) {
+    return getFrameIndex(FI, VT, true);
+  }
+  SDValue getJumpTable(int JTI, EVT VT, bool isTarget = false,
+                       unsigned char TargetFlags = 0);
+  SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags = 0) {
+    return getJumpTable(JTI, VT, true, TargetFlags);
+  }
+  SDValue getConstantPool(const Constant *C, EVT VT,
+                          unsigned Align = 0, int Offs = 0, bool isT=false,
+                          unsigned char TargetFlags = 0);
+  SDValue getTargetConstantPool(const Constant *C, EVT VT,
+                                unsigned Align = 0, int Offset = 0,
+                                unsigned char TargetFlags = 0) {
+    return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
+  }
+  SDValue getConstantPool(MachineConstantPoolValue *C, EVT VT,
+                          unsigned Align = 0, int Offs = 0, bool isT=false,
+                          unsigned char TargetFlags = 0);
+  SDValue getTargetConstantPool(MachineConstantPoolValue *C,
+                                  EVT VT, unsigned Align = 0,
+                                  int Offset = 0, unsigned char TargetFlags=0) {
+    return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
+  }
+  SDValue getTargetIndex(int Index, EVT VT, int64_t Offset = 0,
+                         unsigned char TargetFlags = 0);
+  // When generating a branch to a BB, we don't in general know enough
+  // to provide debug info for the BB at that time, so keep this one around.
+  SDValue getBasicBlock(MachineBasicBlock *MBB);
+  SDValue getBasicBlock(MachineBasicBlock *MBB, SDLoc dl);
+  SDValue getExternalSymbol(const char *Sym, EVT VT);
+  SDValue getExternalSymbol(const char *Sym, const SDLoc &dl, EVT VT);
+  SDValue getTargetExternalSymbol(const char *Sym, EVT VT,
+                                  unsigned char TargetFlags = 0);
+  SDValue getMCSymbol(MCSymbol *Sym, EVT VT);
+
+  SDValue getValueType(EVT);
+  SDValue getRegister(unsigned Reg, EVT VT);
+  SDValue getRegisterMask(const uint32_t *RegMask);
+  SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label);
+  SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root,
+                       MCSymbol *Label);
+  SDValue getBlockAddress(const BlockAddress *BA, EVT VT,
+                          int64_t Offset = 0, bool isTarget = false,
+                          unsigned char TargetFlags = 0);
+  SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT,
+                                int64_t Offset = 0,
+                                unsigned char TargetFlags = 0) {
+    return getBlockAddress(BA, VT, Offset, true, TargetFlags);
+  }
+
+  SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg,
+                       SDValue N) {
+    return getNode(ISD::CopyToReg, dl, MVT::Other, Chain,
+                   getRegister(Reg, N.getValueType()), N);
+  }
+
+  // This version of the getCopyToReg method takes an extra operand, which
+  // indicates that there is potentially an incoming glue value (if Glue is not
+  // null) and that there should be a glue result.
+  SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N,
+                       SDValue Glue) {
+    SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
+    SDValue Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Glue };
+    return getNode(ISD::CopyToReg, dl, VTs,
+                   makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
+  }
+
+  // Similar to last getCopyToReg() except parameter Reg is a SDValue
+  SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, SDValue Reg, SDValue N,
+                       SDValue Glue) {
+    SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
+    SDValue Ops[] = { Chain, Reg, N, Glue };
+    return getNode(ISD::CopyToReg, dl, VTs,
+                   makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
+  }
+
+  SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT) {
+    SDVTList VTs = getVTList(VT, MVT::Other);
+    SDValue Ops[] = { Chain, getRegister(Reg, VT) };
+    return getNode(ISD::CopyFromReg, dl, VTs, Ops);
+  }
+
+  // This version of the getCopyFromReg method takes an extra operand, which
+  // indicates that there is potentially an incoming glue value (if Glue is not
+  // null) and that there should be a glue result.
+  SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT,
+                         SDValue Glue) {
+    SDVTList VTs = getVTList(VT, MVT::Other, MVT::Glue);
+    SDValue Ops[] = { Chain, getRegister(Reg, VT), Glue };
+    return getNode(ISD::CopyFromReg, dl, VTs,
+                   makeArrayRef(Ops, Glue.getNode() ? 3 : 2));
+  }
+
+  SDValue getCondCode(ISD::CondCode Cond);
+
+  /// Return an ISD::VECTOR_SHUFFLE node. The number of elements in VT,
+  /// which must be a vector type, must match the number of mask elements
+  /// NumElts. An integer mask element equal to -1 is treated as undefined.
+  SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2,
+                           ArrayRef<int> Mask);
+
+  /// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
+  /// which must be a vector type, must match the number of operands in Ops.
+  /// The operands must have the same type as (or, for integers, a type wider
+  /// than) VT's element type.
+  SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDValue> Ops) {
+    // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
+    return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
+  }
+
+  /// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
+  /// which must be a vector type, must match the number of operands in Ops.
+  /// The operands must have the same type as (or, for integers, a type wider
+  /// than) VT's element type.
+  SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDUse> Ops) {
+    // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
+    return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
+  }
+
+  /// Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all
+  /// elements. VT must be a vector type. Op's type must be the same as (or,
+  /// for integers, a type wider than) VT's element type.
+  SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op) {
+    // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
+    if (Op.getOpcode() == ISD::UNDEF) {
+      assert((VT.getVectorElementType() == Op.getValueType() ||
+              (VT.isInteger() &&
+               VT.getVectorElementType().bitsLE(Op.getValueType()))) &&
+             "A splatted value must have a width equal or (for integers) "
+             "greater than the vector element type!");
+      return getNode(ISD::UNDEF, SDLoc(), VT);
+    }
+
+    SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Op);
+    return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
+  }
+
+  /// \brief Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to
+  /// the shuffle node in input but with swapped operands.
+  ///
+  /// Example: shuffle A, B, <0,5,2,7> -> shuffle B, A, <4,1,6,3>
+  SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV);
+
+  /// Convert Op, which must be of float type, to the
+  /// float type VT, by either extending or rounding (by truncation).
+  SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT);
+
+  /// Convert Op, which must be of integer type, to the
+  /// integer type VT, by either any-extending or truncating it.
+  SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
+
+  /// Convert Op, which must be of integer type, to the
+  /// integer type VT, by either sign-extending or truncating it.
+  SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
+
+  /// Convert Op, which must be of integer type, to the
+  /// integer type VT, by either zero-extending or truncating it.
+  SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
+
+  /// Return the expression required to zero extend the Op
+  /// value assuming it was the smaller SrcTy value.
+  SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT SrcTy);
+
+  /// Return an operation which will any-extend the low lanes of the operand
+  /// into the specified vector type. For example,
+  /// this can convert a v16i8 into a v4i32 by any-extending the low four
+  /// lanes of the operand from i8 to i32.
+  SDValue getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL, EVT VT);
+
+  /// Return an operation which will sign extend the low lanes of the operand
+  /// into the specified vector type. For example,
+  /// this can convert a v16i8 into a v4i32 by sign extending the low four
+  /// lanes of the operand from i8 to i32.
+  SDValue getSignExtendVectorInReg(SDValue Op, const SDLoc &DL, EVT VT);
+
+  /// Return an operation which will zero extend the low lanes of the operand
+  /// into the specified vector type. For example,
+  /// this can convert a v16i8 into a v4i32 by zero extending the low four
+  /// lanes of the operand from i8 to i32.
+  SDValue getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL, EVT VT);
+
+  /// Convert Op, which must be of integer type, to the integer type VT,
+  /// by using an extension appropriate for the target's
+  /// BooleanContent for type OpVT or truncating it.
+  SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT);
+
+  /// Create a bitwise NOT operation as (XOR Val, -1).
+  SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT);
+
+  /// \brief Create a logical NOT operation as (XOR Val, BooleanOne).
+  SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT);
+
+  /// \brief Create an add instruction with appropriate flags when used for
+  /// addressing some offset of an object. i.e. if a load is split into multiple
+  /// components, create an add nuw from the base pointer to the offset.
+  SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Op, int64_t Offset) {
+    EVT VT = Op.getValueType();
+    return getObjectPtrOffset(SL, Op, getConstant(Offset, SL, VT));
+  }
+
+  SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Op, SDValue Offset) {
+    EVT VT = Op.getValueType();
+
+    // The object itself can't wrap around the address space, so it shouldn't be
+    // possible for the adds of the offsets to the split parts to overflow.
+    SDNodeFlags Flags;
+    Flags.setNoUnsignedWrap(true);
+    return getNode(ISD::ADD, SL, VT, Op, Offset, Flags);
+  }
+
+  /// Return a new CALLSEQ_START node, that starts new call frame, in which
+  /// InSize bytes are set up inside CALLSEQ_START..CALLSEQ_END sequence and
+  /// OutSize specifies part of the frame set up prior to the sequence.
+  SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize,
+                           const SDLoc &DL) {
+    SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
+    SDValue Ops[] = { Chain,
+                      getIntPtrConstant(InSize, DL, true),
+                      getIntPtrConstant(OutSize, DL, true) };
+    return getNode(ISD::CALLSEQ_START, DL, VTs, Ops);
+  }
+
+  /// Return a new CALLSEQ_END node, which always must have a
+  /// glue result (to ensure it's not CSE'd).
+  /// CALLSEQ_END does not have a useful SDLoc.
+  SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2,
+                         SDValue InGlue, const SDLoc &DL) {
+    SDVTList NodeTys = getVTList(MVT::Other, MVT::Glue);
+    SmallVector<SDValue, 4> Ops;
+    Ops.push_back(Chain);
+    Ops.push_back(Op1);
+    Ops.push_back(Op2);
+    if (InGlue.getNode())
+      Ops.push_back(InGlue);
+    return getNode(ISD::CALLSEQ_END, DL, NodeTys, Ops);
+  }
+
+  /// Return true if the result of this operation is always undefined.
+  bool isUndef(unsigned Opcode, ArrayRef<SDValue> Ops);
+
+  /// Return an UNDEF node. UNDEF does not have a useful SDLoc.
+  SDValue getUNDEF(EVT VT) {
+    return getNode(ISD::UNDEF, SDLoc(), VT);
+  }
+
+  /// Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
+  SDValue getGLOBAL_OFFSET_TABLE(EVT VT) {
+    return getNode(ISD::GLOBAL_OFFSET_TABLE, SDLoc(), VT);
+  }
+
+  /// Gets or creates the specified node.
+  ///
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
+                  ArrayRef<SDUse> Ops);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
+                  ArrayRef<SDValue> Ops, const SDNodeFlags Flags = SDNodeFlags());
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, ArrayRef<EVT> ResultTys,
+                  ArrayRef<SDValue> Ops);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs,
+                  ArrayRef<SDValue> Ops);
+
+  // Specialize based on number of operands.
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N,
+                  const SDNodeFlags Flags = SDNodeFlags());
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
+                  SDValue N2, const SDNodeFlags Flags = SDNodeFlags());
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
+                  SDValue N2, SDValue N3);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
+                  SDValue N2, SDValue N3, SDValue N4);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
+                  SDValue N2, SDValue N3, SDValue N4, SDValue N5);
+
+  // Specialize again based on number of operands for nodes with a VTList
+  // rather than a single VT.
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N1,
+                  SDValue N2);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N1,
+                  SDValue N2, SDValue N3);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N1,
+                  SDValue N2, SDValue N3, SDValue N4);
+  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs, SDValue N1,
+                  SDValue N2, SDValue N3, SDValue N4, SDValue N5);
+
+  /// Compute a TokenFactor to force all the incoming stack arguments to be
+  /// loaded from the stack. This is used in tail call lowering to protect
+  /// stack arguments from being clobbered.
+  SDValue getStackArgumentTokenFactor(SDValue Chain);
+
+  SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
+                    SDValue Size, unsigned Align, bool isVol, bool AlwaysInline,
+                    bool isTailCall, MachinePointerInfo DstPtrInfo,
+                    MachinePointerInfo SrcPtrInfo);
+
+  SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
+                     SDValue Size, unsigned Align, bool isVol, bool isTailCall,
+                     MachinePointerInfo DstPtrInfo,
+                     MachinePointerInfo SrcPtrInfo);
+
+  SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
+                    SDValue Size, unsigned Align, bool isVol, bool isTailCall,
+                    MachinePointerInfo DstPtrInfo);
+
+  /// Helper function to make it easier to build SetCC's if you just
+  /// have an ISD::CondCode instead of an SDValue.
+  ///
+  SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS,
+                   ISD::CondCode Cond) {
+    assert(LHS.getValueType().isVector() == RHS.getValueType().isVector() &&
+      "Cannot compare scalars to vectors");
+    assert(LHS.getValueType().isVector() == VT.isVector() &&
+      "Cannot compare scalars to vectors");
+    assert(Cond != ISD::SETCC_INVALID &&
+        "Cannot create a setCC of an invalid node.");
+    return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond));
+  }
+
+  /// Helper function to make it easier to build Select's if you just
+  /// have operands and don't want to check for vector.
+  SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS,
+                    SDValue RHS) {
+    assert(LHS.getValueType() == RHS.getValueType() &&
+           "Cannot use select on differing types");
+    assert(VT.isVector() == LHS.getValueType().isVector() &&
+           "Cannot mix vectors and scalars");
+    return getNode(Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT, DL, VT,
+                   Cond, LHS, RHS);
+  }
+
+  /// Helper function to make it easier to build SelectCC's if you
+  /// just have an ISD::CondCode instead of an SDValue.
+  ///
+  SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True,
+                      SDValue False, ISD::CondCode Cond) {
+    return getNode(ISD::SELECT_CC, DL, True.getValueType(),
+                   LHS, RHS, True, False, getCondCode(Cond));
+  }
+
+  /// VAArg produces a result and token chain, and takes a pointer
+  /// and a source value as input.
+  SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
+                   SDValue SV, unsigned Align);
+
+  /// Gets a node for an atomic cmpxchg op. There are two
+  /// valid Opcodes. ISD::ATOMIC_CMO_SWAP produces the value loaded and a
+  /// chain result. ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS produces the value loaded,
+  /// a success flag (initially i1), and a chain.
+  SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT,
+                           SDVTList VTs, SDValue Chain, SDValue Ptr,
+                           SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
+                           unsigned Alignment, AtomicOrdering SuccessOrdering,
+                           AtomicOrdering FailureOrdering,
+                           SyncScope::ID SSID);
+  SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT,
+                           SDVTList VTs, SDValue Chain, SDValue Ptr,
+                           SDValue Cmp, SDValue Swp, MachineMemOperand *MMO);
+
+  /// Gets a node for an atomic op, produces result (if relevant)
+  /// and chain and takes 2 operands.
+  SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
+                    SDValue Ptr, SDValue Val, const Value *PtrVal,
+                    unsigned Alignment, AtomicOrdering Ordering,
+                    SyncScope::ID SSID);
+  SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
+                    SDValue Ptr, SDValue Val, MachineMemOperand *MMO);
+
+  /// Gets a node for an atomic op, produces result and chain and
+  /// takes 1 operand.
+  SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, EVT VT,
+                    SDValue Chain, SDValue Ptr, MachineMemOperand *MMO);
+
+  /// Gets a node for an atomic op, produces result and chain and takes N
+  /// operands.
+  SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
+                    SDVTList VTList, ArrayRef<SDValue> Ops,
+                    MachineMemOperand *MMO);
+
+  /// Creates a MemIntrinsicNode that may produce a
+  /// result and takes a list of operands. Opcode may be INTRINSIC_VOID,
+  /// INTRINSIC_W_CHAIN, or a target-specific opcode with a value not
+  /// less than FIRST_TARGET_MEMORY_OPCODE.
+  SDValue getMemIntrinsicNode(
+    unsigned Opcode, const SDLoc &dl, SDVTList VTList,
+    ArrayRef<SDValue> Ops, EVT MemVT,
+    MachinePointerInfo PtrInfo,
+    unsigned Align = 0,
+    MachineMemOperand::Flags Flags
+    = MachineMemOperand::MOLoad | MachineMemOperand::MOStore,
+    unsigned Size = 0);
+
+  SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList,
+                              ArrayRef<SDValue> Ops, EVT MemVT,
+                              MachineMemOperand *MMO);
+
+  /// Create a MERGE_VALUES node from the given operands.
+  SDValue getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl);
+
+  /// Loads are not normal binary operators: their result type is not
+  /// determined by their operands, and they produce a value AND a token chain.
+  ///
+  /// This function will set the MOLoad flag on MMOFlags, but you can set it if
+  /// you want.  The MOStore flag must not be set.
+  SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
+                  MachinePointerInfo PtrInfo, unsigned Alignment = 0,
+                  MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
+                  const AAMDNodes &AAInfo = AAMDNodes(),
+                  const MDNode *Ranges = nullptr);
+  SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
+                  MachineMemOperand *MMO);
+  SDValue
+  getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain,
+             SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT,
+             unsigned Alignment = 0,
+             MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
+             const AAMDNodes &AAInfo = AAMDNodes());
+  SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT,
+                     SDValue Chain, SDValue Ptr, EVT MemVT,
+                     MachineMemOperand *MMO);
+  SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
+                         SDValue Offset, ISD::MemIndexedMode AM);
+  SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
+                  const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
+                  MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment = 0,
+                  MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
+                  const AAMDNodes &AAInfo = AAMDNodes(),
+                  const MDNode *Ranges = nullptr);
+  SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
+                  const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
+                  EVT MemVT, MachineMemOperand *MMO);
+
+  /// Helper function to build ISD::STORE nodes.
+  ///
+  /// This function will set the MOStore flag on MMOFlags, but you can set it if
+  /// you want.  The MOLoad and MOInvariant flags must not be set.
+  SDValue
+  getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
+           MachinePointerInfo PtrInfo, unsigned Alignment = 0,
+           MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
+           const AAMDNodes &AAInfo = AAMDNodes());
+  SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
+                   MachineMemOperand *MMO);
+  SDValue
+  getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
+                MachinePointerInfo PtrInfo, EVT TVT, unsigned Alignment = 0,
+                MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
+                const AAMDNodes &AAInfo = AAMDNodes());
+  SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
+                        SDValue Ptr, EVT TVT, MachineMemOperand *MMO);
+  SDValue getIndexedStore(SDValue OrigStoe, const SDLoc &dl, SDValue Base,
+                          SDValue Offset, ISD::MemIndexedMode AM);
+
+  /// Returns sum of the base pointer and offset.
+  SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, const SDLoc &DL);
+
+  SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
+                        SDValue Mask, SDValue Src0, EVT MemVT,
+                        MachineMemOperand *MMO, ISD::LoadExtType,
+                        bool IsExpanding = false);
+  SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val,
+                         SDValue Ptr, SDValue Mask, EVT MemVT,
+                         MachineMemOperand *MMO, bool IsTruncating = false,
+                         bool IsCompressing = false);
+  SDValue getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
+                          ArrayRef<SDValue> Ops, MachineMemOperand *MMO);
+  SDValue getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
+                           ArrayRef<SDValue> Ops, MachineMemOperand *MMO);
+
+  /// Return (create a new or find existing) a target-specific node.
+  /// TargetMemSDNode should be derived class from MemSDNode.
+  template <class TargetMemSDNode>
+  SDValue getTargetMemSDNode(SDVTList VTs, ArrayRef<SDValue> Ops,
+                             const SDLoc &dl, EVT MemVT,
+                             MachineMemOperand *MMO);
+
+  /// Construct a node to track a Value* through the backend.
+  SDValue getSrcValue(const Value *v);
+
+  /// Return an MDNodeSDNode which holds an MDNode.
+  SDValue getMDNode(const MDNode *MD);
+
+  /// Return a bitcast using the SDLoc of the value operand, and casting to the
+  /// provided type. Use getNode to set a custom SDLoc.
+  SDValue getBitcast(EVT VT, SDValue V);
+
+  /// Return an AddrSpaceCastSDNode.
+  SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS,
+                           unsigned DestAS);
+
+  /// Return the specified value casted to
+  /// the target's desired shift amount type.
+  SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op);
+
+  /// Expand the specified \c ISD::VAARG node as the Legalize pass would.
+  SDValue expandVAArg(SDNode *Node);
+
+  /// Expand the specified \c ISD::VACOPY node as the Legalize pass would.
+  SDValue expandVACopy(SDNode *Node);
+
+  /// *Mutate* the specified node in-place to have the
+  /// specified operands.  If the resultant node already exists in the DAG,
+  /// this does not modify the specified node, instead it returns the node that
+  /// already exists.  If the resultant node does not exist in the DAG, the
+  /// input node is returned.  As a degenerate case, if you specify the same
+  /// input operands as the node already has, the input node is returned.
+  SDNode *UpdateNodeOperands(SDNode *N, SDValue Op);
+  SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2);
+  SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
+                               SDValue Op3);
+  SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
+                               SDValue Op3, SDValue Op4);
+  SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
+                               SDValue Op3, SDValue Op4, SDValue Op5);
+  SDNode *UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops);
+
+  // Propagates the change in divergence to users
+  void updateDivergence(SDNode * N);
+
+  /// These are used for target selectors to *mutate* the
+  /// specified node to have the specified return type, Target opcode, and
+  /// operands.  Note that target opcodes are stored as
+  /// ~TargetOpcode in the node opcode field.  The resultant node is returned.
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT, SDValue Op1);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT,
+                       SDValue Op1, SDValue Op2);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT,
+                       SDValue Op1, SDValue Op2, SDValue Op3);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT,
+                       ArrayRef<SDValue> Ops);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1, EVT VT2);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
+                       EVT VT2, ArrayRef<SDValue> Ops);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
+                       EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
+                       EVT VT2, SDValue Op1);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
+                       EVT VT2, SDValue Op1, SDValue Op2);
+  SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, SDVTList VTs,
+                       ArrayRef<SDValue> Ops);
+
+  /// This *mutates* the specified node to have the specified
+  /// return type, opcode, and operands.
+  SDNode *MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs,
+                      ArrayRef<SDValue> Ops);
+
+  /// Mutate the specified strict FP node to its non-strict equivalent,
+  /// unlinking the node from its chain and dropping the metadata arguments.
+  /// The node must be a strict FP node.
+  SDNode *mutateStrictFPToFP(SDNode *Node);
+
+  /// These are used for target selectors to create a new node
+  /// with specified return type(s), MachineInstr opcode, and operands.
+  ///
+  /// Note that getMachineNode returns the resultant node.  If there is already
+  /// a node of the specified opcode and operands, it returns that node instead
+  /// of the current one.
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
+                                SDValue Op1);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
+                                SDValue Op1, SDValue Op2);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
+                                SDValue Op1, SDValue Op2, SDValue Op3);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
+                                ArrayRef<SDValue> Ops);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
+                                EVT VT2, SDValue Op1, SDValue Op2);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
+                                EVT VT2, SDValue Op1, SDValue Op2, SDValue Op3);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
+                                EVT VT2, ArrayRef<SDValue> Ops);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
+                                EVT VT2, EVT VT3, SDValue Op1, SDValue Op2);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
+                                EVT VT2, EVT VT3, SDValue Op1, SDValue Op2,
+                                SDValue Op3);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
+                                EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl,
+                                ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops);
+  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, SDVTList VTs,
+                                ArrayRef<SDValue> Ops);
+
+  /// A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
+  SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
+                                 SDValue Operand);
+
+  /// A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
+  SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
+                                SDValue Operand, SDValue Subreg);
+
+  /// Get the specified node if it's already available, or else return NULL.
+  SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTs, ArrayRef<SDValue> Ops,
+                          const SDNodeFlags Flags = SDNodeFlags());
+
+  /// Creates a SDDbgValue node.
+  SDDbgValue *getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N,
+                          unsigned R, bool IsIndirect, const DebugLoc &DL,
+                          unsigned O);
+
+  /// Creates a constant SDDbgValue node.
+  SDDbgValue *getConstantDbgValue(DIVariable *Var, DIExpression *Expr,
+                                  const Value *C, const DebugLoc &DL,
+                                  unsigned O);
+
+  /// Creates a FrameIndex SDDbgValue node.
+  SDDbgValue *getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr,
+                                    unsigned FI, const DebugLoc &DL,
+                                    unsigned O);
+
+  /// Transfer debug values from one node to another, while optionally
+  /// generating fragment expressions for split-up values. If \p InvalidateDbg
+  /// is set, debug values are invalidated after they are transferred.
+  void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits = 0,
+                         unsigned SizeInBits = 0, bool InvalidateDbg = true);
+
+  /// Remove the specified node from the system. If any of its
+  /// operands then becomes dead, remove them as well. Inform UpdateListener
+  /// for each node deleted.
+  void RemoveDeadNode(SDNode *N);
+
+  /// This method deletes the unreachable nodes in the
+  /// given list, and any nodes that become unreachable as a result.
+  void RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes);
+
+  /// Modify anything using 'From' to use 'To' instead.
+  /// This can cause recursive merging of nodes in the DAG.  Use the first
+  /// version if 'From' is known to have a single result, use the second
+  /// if you have two nodes with identical results (or if 'To' has a superset
+  /// of the results of 'From'), use the third otherwise.
+  ///
+  /// These methods all take an optional UpdateListener, which (if not null) is
+  /// informed about nodes that are deleted and modified due to recursive
+  /// changes in the dag.
+  ///
+  /// These functions only replace all existing uses. It's possible that as
+  /// these replacements are being performed, CSE may cause the From node
+  /// to be given new uses. These new uses of From are left in place, and
+  /// not automatically transferred to To.
+  ///
+  void ReplaceAllUsesWith(SDValue From, SDValue Op);
+  void ReplaceAllUsesWith(SDNode *From, SDNode *To);
+  void ReplaceAllUsesWith(SDNode *From, const SDValue *To);
+
+  /// Replace any uses of From with To, leaving
+  /// uses of other values produced by From.getNode() alone.
+  void ReplaceAllUsesOfValueWith(SDValue From, SDValue To);
+
+  /// Like ReplaceAllUsesOfValueWith, but for multiple values at once.
+  /// This correctly handles the case where
+  /// there is an overlap between the From values and the To values.
+  void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To,
+                                  unsigned Num);
+
+  /// If an existing load has uses of its chain, create a token factor node with
+  /// that chain and the new memory node's chain and update users of the old
+  /// chain to the token factor. This ensures that the new memory node will have
+  /// the same relative memory dependency position as the old load. Returns the
+  /// new merged load chain.
+  SDValue makeEquivalentMemoryOrdering(LoadSDNode *Old, SDValue New);
+
+  /// Topological-sort the AllNodes list and a
+  /// assign a unique node id for each node in the DAG based on their
+  /// topological order. Returns the number of nodes.
+  unsigned AssignTopologicalOrder();
+
+  /// Move node N in the AllNodes list to be immediately
+  /// before the given iterator Position. This may be used to update the
+  /// topological ordering when the list of nodes is modified.
+  void RepositionNode(allnodes_iterator Position, SDNode *N) {
+    AllNodes.insert(Position, AllNodes.remove(N));
+  }
+
+  /// Returns an APFloat semantics tag appropriate for the given type. If VT is
+  /// a vector type, the element semantics are returned.
+  static const fltSemantics &EVTToAPFloatSemantics(EVT VT) {
+    switch (VT.getScalarType().getSimpleVT().SimpleTy) {
+    default: llvm_unreachable("Unknown FP format");
+    case MVT::f16:     return APFloat::IEEEhalf();
+    case MVT::f32:     return APFloat::IEEEsingle();
+    case MVT::f64:     return APFloat::IEEEdouble();
+    case MVT::f80:     return APFloat::x87DoubleExtended();
+    case MVT::f128:    return APFloat::IEEEquad();
+    case MVT::ppcf128: return APFloat::PPCDoubleDouble();
+    }
+  }
+
+  /// Add a dbg_value SDNode. If SD is non-null that means the
+  /// value is produced by SD.
+  void AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter);
+
+  /// Get the debug values which reference the given SDNode.
+  ArrayRef<SDDbgValue*> GetDbgValues(const SDNode* SD) {
+    return DbgInfo->getSDDbgValues(SD);
+  }
+
+public:
+  /// Return true if there are any SDDbgValue nodes associated
+  /// with this SelectionDAG.
+  bool hasDebugValues() const { return !DbgInfo->empty(); }
+
+  SDDbgInfo::DbgIterator DbgBegin() { return DbgInfo->DbgBegin(); }
+  SDDbgInfo::DbgIterator DbgEnd()   { return DbgInfo->DbgEnd(); }
+
+  SDDbgInfo::DbgIterator ByvalParmDbgBegin() {
+    return DbgInfo->ByvalParmDbgBegin();
+  }
+
+  SDDbgInfo::DbgIterator ByvalParmDbgEnd()   {
+    return DbgInfo->ByvalParmDbgEnd();
+  }
+
+  /// To be invoked on an SDNode that is slated to be erased. This
+  /// function mirrors \c llvm::salvageDebugInfo.
+  void salvageDebugInfo(SDNode &N);
+
+  void dump() const;
+
+  /// Create a stack temporary, suitable for holding the specified value type.
+  /// If minAlign is specified, the slot size will have at least that alignment.
+  SDValue CreateStackTemporary(EVT VT, unsigned minAlign = 1);
+
+  /// Create a stack temporary suitable for holding either of the specified
+  /// value types.
+  SDValue CreateStackTemporary(EVT VT1, EVT VT2);
+
+  SDValue FoldSymbolOffset(unsigned Opcode, EVT VT,
+                           const GlobalAddressSDNode *GA,
+                           const SDNode *N2);
+
+  SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
+                                 SDNode *Cst1, SDNode *Cst2);
+
+  SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
+                                 const ConstantSDNode *Cst1,
+                                 const ConstantSDNode *Cst2);
+
+  SDValue FoldConstantVectorArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
+                                       ArrayRef<SDValue> Ops,
+                                       const SDNodeFlags Flags = SDNodeFlags());
+
+  /// Constant fold a setcc to true or false.
+  SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond,
+                    const SDLoc &dl);
+
+  /// See if the specified operand can be simplified with the knowledge that only
+  /// the bits specified by Mask are used.  If so, return the simpler operand,
+  /// otherwise return a null SDValue.
+  ///
+  /// (This exists alongside SimplifyDemandedBits because GetDemandedBits can
+  /// simplify nodes with multiple uses more aggressively.)
+  SDValue GetDemandedBits(SDValue V, const APInt &Mask);
+
+  /// Return true if the sign bit of Op is known to be zero.
+  /// We use this predicate to simplify operations downstream.
+  bool SignBitIsZero(SDValue Op, unsigned Depth = 0) const;
+
+  /// Return true if 'Op & Mask' is known to be zero.  We
+  /// use this predicate to simplify operations downstream.  Op and Mask are
+  /// known to be the same type.
+  bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth = 0)
+    const;
+
+  /// Determine which bits of Op are known to be either zero or one and return
+  /// them in Known. For vectors, the known bits are those that are shared by
+  /// every vector element.
+  /// Targets can implement the computeKnownBitsForTargetNode method in the
+  /// TargetLowering class to allow target nodes to be understood.
+  void computeKnownBits(SDValue Op, KnownBits &Known, unsigned Depth = 0) const;
+
+  /// Determine which bits of Op are known to be either zero or one and return
+  /// them in Known. The DemandedElts argument allows us to only collect the
+  /// known bits that are shared by the requested vector elements.
+  /// Targets can implement the computeKnownBitsForTargetNode method in the
+  /// TargetLowering class to allow target nodes to be understood.
+  void computeKnownBits(SDValue Op, KnownBits &Known, const APInt &DemandedElts,
+                        unsigned Depth = 0) const;
+
+  /// Used to represent the possible overflow behavior of an operation.
+  /// Never: the operation cannot overflow.
+  /// Always: the operation will always overflow.
+  /// Sometime: the operation may or may not overflow.
+  enum OverflowKind {
+    OFK_Never,
+    OFK_Sometime,
+    OFK_Always,
+  };
+
+  /// Determine if the result of the addition of 2 node can overflow.
+  OverflowKind computeOverflowKind(SDValue N0, SDValue N1) const;
+
+  /// Test if the given value is known to have exactly one bit set. This differs
+  /// from computeKnownBits in that it doesn't necessarily determine which bit
+  /// is set.
+  bool isKnownToBeAPowerOfTwo(SDValue Val) const;
+
+  /// Return the number of times the sign bit of the register is replicated into
+  /// the other bits. We know that at least 1 bit is always equal to the sign
+  /// bit (itself), but other cases can give us information. For example,
+  /// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
+  /// to each other, so we return 3. Targets can implement the
+  /// ComputeNumSignBitsForTarget method in the TargetLowering class to allow
+  /// target nodes to be understood.
+  unsigned ComputeNumSignBits(SDValue Op, unsigned Depth = 0) const;
+
+  /// Return the number of times the sign bit of the register is replicated into
+  /// the other bits. We know that at least 1 bit is always equal to the sign
+  /// bit (itself), but other cases can give us information. For example,
+  /// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
+  /// to each other, so we return 3. The DemandedElts argument allows
+  /// us to only collect the minimum sign bits of the requested vector elements.
+  /// Targets can implement the ComputeNumSignBitsForTarget method in the
+  /// TargetLowering class to allow target nodes to be understood.
+  unsigned ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
+                              unsigned Depth = 0) const;
+
+  /// Return true if the specified operand is an ISD::ADD with a ConstantSDNode
+  /// on the right-hand side, or if it is an ISD::OR with a ConstantSDNode that
+  /// is guaranteed to have the same semantics as an ADD. This handles the
+  /// equivalence:
+  ///     X|Cst == X+Cst iff X&Cst = 0.
+  bool isBaseWithConstantOffset(SDValue Op) const;
+
+  /// Test whether the given SDValue is known to never be NaN.
+  bool isKnownNeverNaN(SDValue Op) const;
+
+  /// Test whether the given SDValue is known to never be positive or negative
+  /// zero.
+  bool isKnownNeverZero(SDValue Op) const;
+
+  /// Test whether two SDValues are known to compare equal. This
+  /// is true if they are the same value, or if one is negative zero and the
+  /// other positive zero.
+  bool isEqualTo(SDValue A, SDValue B) const;
+
+  /// Return true if A and B have no common bits set. As an example, this can
+  /// allow an 'add' to be transformed into an 'or'.
+  bool haveNoCommonBitsSet(SDValue A, SDValue B) const;
+
+  /// Utility function used by legalize and lowering to
+  /// "unroll" a vector operation by splitting out the scalars and operating
+  /// on each element individually.  If the ResNE is 0, fully unroll the vector
+  /// op. If ResNE is less than the width of the vector op, unroll up to ResNE.
+  /// If the  ResNE is greater than the width of the vector op, unroll the
+  /// vector op and fill the end of the resulting vector with UNDEFS.
+  SDValue UnrollVectorOp(SDNode *N, unsigned ResNE = 0);
+
+  /// Return true if loads are next to each other and can be
+  /// merged. Check that both are nonvolatile and if LD is loading
+  /// 'Bytes' bytes from a location that is 'Dist' units away from the
+  /// location that the 'Base' load is loading from.
+  bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base,
+                                      unsigned Bytes, int Dist) const;
+
+  /// Infer alignment of a load / store address. Return 0 if
+  /// it cannot be inferred.
+  unsigned InferPtrAlignment(SDValue Ptr) const;
+
+  /// Compute the VTs needed for the low/hi parts of a type
+  /// which is split (or expanded) into two not necessarily identical pieces.
+  std::pair<EVT, EVT> GetSplitDestVTs(const EVT &VT) const;
+
+  /// Split the vector with EXTRACT_SUBVECTOR using the provides
+  /// VTs and return the low/high part.
+  std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL,
+                                          const EVT &LoVT, const EVT &HiVT);
+
+  /// Split the vector with EXTRACT_SUBVECTOR and return the low/high part.
+  std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL) {
+    EVT LoVT, HiVT;
+    std::tie(LoVT, HiVT) = GetSplitDestVTs(N.getValueType());
+    return SplitVector(N, DL, LoVT, HiVT);
+  }
+
+  /// Split the node's operand with EXTRACT_SUBVECTOR and
+  /// return the low/high part.
+  std::pair<SDValue, SDValue> SplitVectorOperand(const SDNode *N, unsigned OpNo)
+  {
+    return SplitVector(N->getOperand(OpNo), SDLoc(N));
+  }
+
+  /// Append the extracted elements from Start to Count out of the vector Op
+  /// in Args. If Count is 0, all of the elements will be extracted.
+  void ExtractVectorElements(SDValue Op, SmallVectorImpl<SDValue> &Args,
+                             unsigned Start = 0, unsigned Count = 0);
+
+  /// Compute the default alignment value for the given type.
+  unsigned getEVTAlignment(EVT MemoryVT) const;
+
+  /// Test whether the given value is a constant int or similar node.
+  SDNode *isConstantIntBuildVectorOrConstantInt(SDValue N);
+
+  /// Test whether the given value is a constant FP or similar node.
+  SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N);
+
+  /// \returns true if \p N is any kind of constant or build_vector of
+  /// constants, int or float. If a vector, it may not necessarily be a splat.
+  inline bool isConstantValueOfAnyType(SDValue N) {
+    return isConstantIntBuildVectorOrConstantInt(N) ||
+           isConstantFPBuildVectorOrConstantFP(N);
+  }
+
+private:
+  void InsertNode(SDNode *N);
+  bool RemoveNodeFromCSEMaps(SDNode *N);
+  void AddModifiedNodeToCSEMaps(SDNode *N);
+  SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op, void *&InsertPos);
+  SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op1, SDValue Op2,
+                               void *&InsertPos);
+  SDNode *FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
+                               void *&InsertPos);
+  SDNode *UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &loc);
+
+  void DeleteNodeNotInCSEMaps(SDNode *N);
+  void DeallocateNode(SDNode *N);
+
+  void allnodes_clear();
+
+  /// Look up the node specified by ID in CSEMap.  If it exists, return it.  If
+  /// not, return the insertion token that will make insertion faster.  This
+  /// overload is for nodes other than Constant or ConstantFP, use the other one
+  /// for those.
+  SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
+
+  /// Look up the node specified by ID in CSEMap.  If it exists, return it.  If
+  /// not, return the insertion token that will make insertion faster.  Performs
+  /// additional processing for constant nodes.
+  SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, const SDLoc &DL,
+                              void *&InsertPos);
+
+  /// List of non-single value types.
+  FoldingSet<SDVTListNode> VTListMap;
+
+  /// Maps to auto-CSE operations.
+  std::vector<CondCodeSDNode*> CondCodeNodes;
+
+  std::vector<SDNode*> ValueTypeNodes;
+  std::map<EVT, SDNode*, EVT::compareRawBits> ExtendedValueTypeNodes;
+  StringMap<SDNode*> ExternalSymbols;
+
+  std::map<std::pair<std::string, unsigned char>,SDNode*> TargetExternalSymbols;
+  DenseMap<MCSymbol *, SDNode *> MCSymbols;
+};
+
+template <> struct GraphTraits<SelectionDAG*> : public GraphTraits<SDNode*> {
+  using nodes_iterator = pointer_iterator<SelectionDAG::allnodes_iterator>;
+
+  static nodes_iterator nodes_begin(SelectionDAG *G) {
+    return nodes_iterator(G->allnodes_begin());
+  }
+
+  static nodes_iterator nodes_end(SelectionDAG *G) {
+    return nodes_iterator(G->allnodes_end());
+  }
+};
+
+template <class TargetMemSDNode>
+SDValue SelectionDAG::getTargetMemSDNode(SDVTList VTs,
+                                         ArrayRef<SDValue> Ops,
+                                         const SDLoc &dl, EVT MemVT,
+                                         MachineMemOperand *MMO) {
+  /// Compose node ID and try to find an existing node.
+  FoldingSetNodeID ID;
+  unsigned Opcode =
+    TargetMemSDNode(dl.getIROrder(), DebugLoc(), VTs, MemVT, MMO).getOpcode();
+  ID.AddInteger(Opcode);
+  ID.AddPointer(VTs.VTs);
+  for (auto& Op : Ops) {
+    ID.AddPointer(Op.getNode());
+    ID.AddInteger(Op.getResNo());
+  }
+  ID.AddInteger(MemVT.getRawBits());
+  ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
+  ID.AddInteger(getSyntheticNodeSubclassData<TargetMemSDNode>(
+    dl.getIROrder(), VTs, MemVT, MMO));
+
+  void *IP = nullptr;
+  if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
+    cast<TargetMemSDNode>(E)->refineAlignment(MMO);
+    return SDValue(E, 0);
+  }
+
+  /// Existing node was not found. Create a new one.
+  auto *N = newSDNode<TargetMemSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
+                                       MemVT, MMO);
+  createOperands(N, Ops);
+  CSEMap.InsertNode(N, IP);
+  InsertNode(N);
+  return SDValue(N, 0);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SELECTIONDAG_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h
new file mode 100644
index 0000000..5806064
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGAddressAnalysis.h
@@ -0,0 +1,64 @@
+//===- SelectionDAGAddressAnalysis.h - DAG Address Analysis -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SELECTIONDAGADDRESSANALYSIS_H
+#define LLVM_CODEGEN_SELECTIONDAGADDRESSANALYSIS_H
+
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include <cstdint>
+
+namespace llvm {
+
+class SelectionDAG;
+
+/// Helper struct to parse and store a memory address as base + index + offset.
+/// We ignore sign extensions when it is safe to do so.
+/// The following two expressions are not equivalent. To differentiate we need
+/// to store whether there was a sign extension involved in the index
+/// computation.
+///  (load (i64 add (i64 copyfromreg %c)
+///                 (i64 signextend (add (i8 load %index)
+///                                      (i8 1))))
+/// vs
+///
+/// (load (i64 add (i64 copyfromreg %c)
+///                (i64 signextend (i32 add (i32 signextend (i8 load %index))
+///                                         (i32 1)))))
+class BaseIndexOffset {
+private:
+  SDValue Base;
+  SDValue Index;
+  int64_t Offset = 0;
+  bool IsIndexSignExt = false;
+
+public:
+  BaseIndexOffset() = default;
+  BaseIndexOffset(SDValue Base, SDValue Index, int64_t Offset,
+                  bool IsIndexSignExt)
+      : Base(Base), Index(Index), Offset(Offset),
+        IsIndexSignExt(IsIndexSignExt) {}
+
+  SDValue getBase() { return Base; }
+  SDValue getIndex() { return Index; }
+
+  bool equalBaseIndex(BaseIndexOffset &Other, const SelectionDAG &DAG) {
+    int64_t Off;
+    return equalBaseIndex(Other, DAG, Off);
+  }
+
+  bool equalBaseIndex(BaseIndexOffset &Other, const SelectionDAG &DAG,
+                      int64_t &Off);
+
+  /// Parses tree in Ptr for base, index, offset addresses.
+  static BaseIndexOffset match(LSBaseSDNode *N, const SelectionDAG &DAG);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SELECTIONDAGADDRESSANALYSIS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/SelectionDAGISel.h b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGISel.h
new file mode 100644
index 0000000..e56eafc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGISel.h
@@ -0,0 +1,348 @@
+//===-- llvm/CodeGen/SelectionDAGISel.h - Common Base Class------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SelectionDAGISel class, which is used as the common
+// base class for SelectionDAG-based instruction selectors.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SELECTIONDAGISEL_H
+#define LLVM_CODEGEN_SELECTIONDAGISEL_H
+
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/Pass.h"
+#include <memory>
+
+namespace llvm {
+  class FastISel;
+  class SelectionDAGBuilder;
+  class SDValue;
+  class MachineRegisterInfo;
+  class MachineBasicBlock;
+  class MachineFunction;
+  class MachineInstr;
+  class OptimizationRemarkEmitter;
+  class TargetLowering;
+  class TargetLibraryInfo;
+  class FunctionLoweringInfo;
+  class ScheduleHazardRecognizer;
+  class GCFunctionInfo;
+  class ScheduleDAGSDNodes;
+  class LoadInst;
+
+/// SelectionDAGISel - This is the common base class used for SelectionDAG-based
+/// pattern-matching instruction selectors.
+class SelectionDAGISel : public MachineFunctionPass {
+public:
+  TargetMachine &TM;
+  const TargetLibraryInfo *LibInfo;
+  FunctionLoweringInfo *FuncInfo;
+  MachineFunction *MF;
+  MachineRegisterInfo *RegInfo;
+  SelectionDAG *CurDAG;
+  SelectionDAGBuilder *SDB;
+  AliasAnalysis *AA;
+  GCFunctionInfo *GFI;
+  CodeGenOpt::Level OptLevel;
+  const TargetInstrInfo *TII;
+  const TargetLowering *TLI;
+  bool FastISelFailed;
+  SmallPtrSet<const Instruction *, 4> ElidedArgCopyInstrs;
+
+  /// Current optimization remark emitter.
+  /// Used to report things like combines and FastISel failures.
+  std::unique_ptr<OptimizationRemarkEmitter> ORE;
+
+  static char ID;
+
+  explicit SelectionDAGISel(TargetMachine &tm,
+                            CodeGenOpt::Level OL = CodeGenOpt::Default);
+  ~SelectionDAGISel() override;
+
+  const TargetLowering *getTargetLowering() const { return TLI; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  virtual void EmitFunctionEntryCode() {}
+
+  /// PreprocessISelDAG - This hook allows targets to hack on the graph before
+  /// instruction selection starts.
+  virtual void PreprocessISelDAG() {}
+
+  /// PostprocessISelDAG() - This hook allows the target to hack on the graph
+  /// right after selection.
+  virtual void PostprocessISelDAG() {}
+
+  /// Main hook for targets to transform nodes into machine nodes.
+  virtual void Select(SDNode *N) = 0;
+
+  /// SelectInlineAsmMemoryOperand - Select the specified address as a target
+  /// addressing mode, according to the specified constraint.  If this does
+  /// not match or is not implemented, return true.  The resultant operands
+  /// (which will appear in the machine instruction) should be added to the
+  /// OutOps vector.
+  virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
+                                            unsigned ConstraintID,
+                                            std::vector<SDValue> &OutOps) {
+    return true;
+  }
+
+  /// IsProfitableToFold - Returns true if it's profitable to fold the specific
+  /// operand node N of U during instruction selection that starts at Root.
+  virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const;
+
+  /// IsLegalToFold - Returns true if the specific operand node N of
+  /// U can be folded during instruction selection that starts at Root.
+  /// FIXME: This is a static member function because the MSP430/X86
+  /// targets, which uses it during isel.  This could become a proper member.
+  static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
+                            CodeGenOpt::Level OptLevel,
+                            bool IgnoreChains = false);
+
+  static void InvalidateNodeId(SDNode *N);
+  static int getUninvalidatedNodeId(SDNode *N);
+
+  static void EnforceNodeIdInvariant(SDNode *N);
+
+  // Opcodes used by the DAG state machine:
+  enum BuiltinOpcodes {
+    OPC_Scope,
+    OPC_RecordNode,
+    OPC_RecordChild0, OPC_RecordChild1, OPC_RecordChild2, OPC_RecordChild3,
+    OPC_RecordChild4, OPC_RecordChild5, OPC_RecordChild6, OPC_RecordChild7,
+    OPC_RecordMemRef,
+    OPC_CaptureGlueInput,
+    OPC_MoveChild,
+    OPC_MoveChild0, OPC_MoveChild1, OPC_MoveChild2, OPC_MoveChild3,
+    OPC_MoveChild4, OPC_MoveChild5, OPC_MoveChild6, OPC_MoveChild7,
+    OPC_MoveParent,
+    OPC_CheckSame,
+    OPC_CheckChild0Same, OPC_CheckChild1Same,
+    OPC_CheckChild2Same, OPC_CheckChild3Same,
+    OPC_CheckPatternPredicate,
+    OPC_CheckPredicate,
+    OPC_CheckOpcode,
+    OPC_SwitchOpcode,
+    OPC_CheckType,
+    OPC_CheckTypeRes,
+    OPC_SwitchType,
+    OPC_CheckChild0Type, OPC_CheckChild1Type, OPC_CheckChild2Type,
+    OPC_CheckChild3Type, OPC_CheckChild4Type, OPC_CheckChild5Type,
+    OPC_CheckChild6Type, OPC_CheckChild7Type,
+    OPC_CheckInteger,
+    OPC_CheckChild0Integer, OPC_CheckChild1Integer, OPC_CheckChild2Integer,
+    OPC_CheckChild3Integer, OPC_CheckChild4Integer,
+    OPC_CheckCondCode,
+    OPC_CheckValueType,
+    OPC_CheckComplexPat,
+    OPC_CheckAndImm, OPC_CheckOrImm,
+    OPC_CheckFoldableChainNode,
+
+    OPC_EmitInteger,
+    OPC_EmitRegister,
+    OPC_EmitRegister2,
+    OPC_EmitConvertToTarget,
+    OPC_EmitMergeInputChains,
+    OPC_EmitMergeInputChains1_0,
+    OPC_EmitMergeInputChains1_1,
+    OPC_EmitMergeInputChains1_2,
+    OPC_EmitCopyToReg,
+    OPC_EmitNodeXForm,
+    OPC_EmitNode,
+    // Space-optimized forms that implicitly encode number of result VTs.
+    OPC_EmitNode0, OPC_EmitNode1, OPC_EmitNode2,
+    OPC_MorphNodeTo,
+    // Space-optimized forms that implicitly encode number of result VTs.
+    OPC_MorphNodeTo0, OPC_MorphNodeTo1, OPC_MorphNodeTo2,
+    OPC_CompleteMatch,
+    // Contains offset in table for pattern being selected
+    OPC_Coverage
+  };
+
+  enum {
+    OPFL_None       = 0,  // Node has no chain or glue input and isn't variadic.
+    OPFL_Chain      = 1,     // Node has a chain input.
+    OPFL_GlueInput  = 2,     // Node has a glue input.
+    OPFL_GlueOutput = 4,     // Node has a glue output.
+    OPFL_MemRefs    = 8,     // Node gets accumulated MemRefs.
+    OPFL_Variadic0  = 1<<4,  // Node is variadic, root has 0 fixed inputs.
+    OPFL_Variadic1  = 2<<4,  // Node is variadic, root has 1 fixed inputs.
+    OPFL_Variadic2  = 3<<4,  // Node is variadic, root has 2 fixed inputs.
+    OPFL_Variadic3  = 4<<4,  // Node is variadic, root has 3 fixed inputs.
+    OPFL_Variadic4  = 5<<4,  // Node is variadic, root has 4 fixed inputs.
+    OPFL_Variadic5  = 6<<4,  // Node is variadic, root has 5 fixed inputs.
+    OPFL_Variadic6  = 7<<4,  // Node is variadic, root has 6 fixed inputs.
+
+    OPFL_VariadicInfo = OPFL_Variadic6
+  };
+
+  /// getNumFixedFromVariadicInfo - Transform an EmitNode flags word into the
+  /// number of fixed arity values that should be skipped when copying from the
+  /// root.
+  static inline int getNumFixedFromVariadicInfo(unsigned Flags) {
+    return ((Flags&OPFL_VariadicInfo) >> 4)-1;
+  }
+
+
+protected:
+  /// DAGSize - Size of DAG being instruction selected.
+  ///
+  unsigned DAGSize;
+
+  /// ReplaceUses - replace all uses of the old node F with the use
+  /// of the new node T.
+  void ReplaceUses(SDValue F, SDValue T) {
+    CurDAG->ReplaceAllUsesOfValueWith(F, T);
+    EnforceNodeIdInvariant(T.getNode());
+  }
+
+  /// ReplaceUses - replace all uses of the old nodes F with the use
+  /// of the new nodes T.
+  void ReplaceUses(const SDValue *F, const SDValue *T, unsigned Num) {
+    CurDAG->ReplaceAllUsesOfValuesWith(F, T, Num);
+    for (unsigned i = 0; i < Num; ++i)
+      EnforceNodeIdInvariant(T[i].getNode());
+  }
+
+  /// ReplaceUses - replace all uses of the old node F with the use
+  /// of the new node T.
+  void ReplaceUses(SDNode *F, SDNode *T) {
+    CurDAG->ReplaceAllUsesWith(F, T);
+    EnforceNodeIdInvariant(T);
+  }
+
+  /// Replace all uses of \c F with \c T, then remove \c F from the DAG.
+  void ReplaceNode(SDNode *F, SDNode *T) {
+    CurDAG->ReplaceAllUsesWith(F, T);
+    EnforceNodeIdInvariant(T);
+    CurDAG->RemoveDeadNode(F);
+  }
+
+  /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
+  /// by tblgen.  Others should not call it.
+  void SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
+                                     const SDLoc &DL);
+
+  /// getPatternForIndex - Patterns selected by tablegen during ISEL
+  virtual StringRef getPatternForIndex(unsigned index) {
+    llvm_unreachable("Tblgen should generate the implementation of this!");
+  }
+
+  /// getIncludePathForIndex - get the td source location of pattern instantiation
+  virtual StringRef getIncludePathForIndex(unsigned index) {
+    llvm_unreachable("Tblgen should generate the implementation of this!");
+  }
+public:
+  // Calls to these predicates are generated by tblgen.
+  bool CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
+                    int64_t DesiredMaskS) const;
+  bool CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
+                    int64_t DesiredMaskS) const;
+
+
+  /// CheckPatternPredicate - This function is generated by tblgen in the
+  /// target.  It runs the specified pattern predicate and returns true if it
+  /// succeeds or false if it fails.  The number is a private implementation
+  /// detail to the code tblgen produces.
+  virtual bool CheckPatternPredicate(unsigned PredNo) const {
+    llvm_unreachable("Tblgen should generate the implementation of this!");
+  }
+
+  /// CheckNodePredicate - This function is generated by tblgen in the target.
+  /// It runs node predicate number PredNo and returns true if it succeeds or
+  /// false if it fails.  The number is a private implementation
+  /// detail to the code tblgen produces.
+  virtual bool CheckNodePredicate(SDNode *N, unsigned PredNo) const {
+    llvm_unreachable("Tblgen should generate the implementation of this!");
+  }
+
+  virtual bool CheckComplexPattern(SDNode *Root, SDNode *Parent, SDValue N,
+                                   unsigned PatternNo,
+                        SmallVectorImpl<std::pair<SDValue, SDNode*> > &Result) {
+    llvm_unreachable("Tblgen should generate the implementation of this!");
+  }
+
+  virtual SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {
+    llvm_unreachable("Tblgen should generate this!");
+  }
+
+  void SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
+                        unsigned TableSize);
+
+  /// \brief Return true if complex patterns for this target can mutate the
+  /// DAG.
+  virtual bool ComplexPatternFuncMutatesDAG() const {
+    return false;
+  }
+
+  bool isOrEquivalentToAdd(const SDNode *N) const;
+
+private:
+
+  // Calls to these functions are generated by tblgen.
+  void Select_INLINEASM(SDNode *N);
+  void Select_READ_REGISTER(SDNode *N);
+  void Select_WRITE_REGISTER(SDNode *N);
+  void Select_UNDEF(SDNode *N);
+  void CannotYetSelect(SDNode *N);
+
+private:
+  void DoInstructionSelection();
+  SDNode *MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTs,
+                    ArrayRef<SDValue> Ops, unsigned EmitNodeInfo);
+
+  SDNode *MutateStrictFPToFP(SDNode *Node, unsigned NewOpc);
+
+  /// Prepares the landing pad to take incoming values or do other EH
+  /// personality specific tasks. Returns true if the block should be
+  /// instruction selected, false if no code should be emitted for it.
+  bool PrepareEHLandingPad();
+
+  /// \brief Perform instruction selection on all basic blocks in the function.
+  void SelectAllBasicBlocks(const Function &Fn);
+
+  /// \brief Perform instruction selection on a single basic block, for
+  /// instructions between \p Begin and \p End.  \p HadTailCall will be set
+  /// to true if a call in the block was translated as a tail call.
+  void SelectBasicBlock(BasicBlock::const_iterator Begin,
+                        BasicBlock::const_iterator End,
+                        bool &HadTailCall);
+  void FinishBasicBlock();
+
+  void CodeGenAndEmitDAG();
+
+  /// \brief Generate instructions for lowering the incoming arguments of the
+  /// given function.
+  void LowerArguments(const Function &F);
+
+  void ComputeLiveOutVRegInfo();
+
+  /// Create the scheduler. If a specific scheduler was specified
+  /// via the SchedulerRegistry, use it, otherwise select the
+  /// one preferred by the target.
+  ///
+  ScheduleDAGSDNodes *CreateScheduler();
+
+  /// OpcodeOffset - This is a cache used to dispatch efficiently into isel
+  /// state machines that start with a OPC_SwitchOpcode node.
+  std::vector<unsigned> OpcodeOffset;
+
+  void UpdateChains(SDNode *NodeToMatch, SDValue InputChain,
+                    SmallVectorImpl<SDNode *> &ChainNodesMatched,
+                    bool isMorphNodeTo);
+};
+
+}
+
+#endif /* LLVM_CODEGEN_SELECTIONDAGISEL_H */
diff --git a/linux-x64/clang/include/llvm/CodeGen/SelectionDAGNodes.h b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGNodes.h
new file mode 100644
index 0000000..ffb5c00
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -0,0 +1,2399 @@
+//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SDNode class and derived classes, which are used to
+// represent the nodes and operations present in a SelectionDAG.  These nodes
+// and operations are machine code level operations, with some similarities to
+// the GCC RTL representation.
+//
+// Clients should include the SelectionDAG.h file instead of this file directly.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
+#define LLVM_CODEGEN_SELECTIONDAGNODES_H
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachineValueType.h"
+#include <algorithm>
+#include <cassert>
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <iterator>
+#include <string>
+#include <tuple>
+
+namespace llvm {
+
+class APInt;
+class Constant;
+template <typename T> struct DenseMapInfo;
+class GlobalValue;
+class MachineBasicBlock;
+class MachineConstantPoolValue;
+class MCSymbol;
+class raw_ostream;
+class SDNode;
+class SelectionDAG;
+class Type;
+class Value;
+
+void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
+                    bool force = false);
+
+/// This represents a list of ValueType's that has been intern'd by
+/// a SelectionDAG.  Instances of this simple value class are returned by
+/// SelectionDAG::getVTList(...).
+///
+struct SDVTList {
+  const EVT *VTs;
+  unsigned int NumVTs;
+};
+
+namespace ISD {
+
+  /// Node predicates
+
+  /// If N is a BUILD_VECTOR node whose elements are all the same constant or
+  /// undefined, return true and return the constant value in \p SplatValue.
+  bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
+
+  /// Return true if the specified node is a BUILD_VECTOR where all of the
+  /// elements are ~0 or undef.
+  bool isBuildVectorAllOnes(const SDNode *N);
+
+  /// Return true if the specified node is a BUILD_VECTOR where all of the
+  /// elements are 0 or undef.
+  bool isBuildVectorAllZeros(const SDNode *N);
+
+  /// Return true if the specified node is a BUILD_VECTOR node of all
+  /// ConstantSDNode or undef.
+  bool isBuildVectorOfConstantSDNodes(const SDNode *N);
+
+  /// Return true if the specified node is a BUILD_VECTOR node of all
+  /// ConstantFPSDNode or undef.
+  bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
+
+  /// Return true if the node has at least one operand and all operands of the
+  /// specified node are ISD::UNDEF.
+  bool allOperandsUndef(const SDNode *N);
+
+} // end namespace ISD
+
+//===----------------------------------------------------------------------===//
+/// Unlike LLVM values, Selection DAG nodes may return multiple
+/// values as the result of a computation.  Many nodes return multiple values,
+/// from loads (which define a token and a return value) to ADDC (which returns
+/// a result and a carry value), to calls (which may return an arbitrary number
+/// of values).
+///
+/// As such, each use of a SelectionDAG computation must indicate the node that
+/// computes it as well as which return value to use from that node.  This pair
+/// of information is represented with the SDValue value type.
+///
+class SDValue {
+  friend struct DenseMapInfo<SDValue>;
+
+  SDNode *Node = nullptr; // The node defining the value we are using.
+  unsigned ResNo = 0;     // Which return value of the node we are using.
+
+public:
+  SDValue() = default;
+  SDValue(SDNode *node, unsigned resno);
+
+  /// get the index which selects a specific result in the SDNode
+  unsigned getResNo() const { return ResNo; }
+
+  /// get the SDNode which holds the desired result
+  SDNode *getNode() const { return Node; }
+
+  /// set the SDNode
+  void setNode(SDNode *N) { Node = N; }
+
+  inline SDNode *operator->() const { return Node; }
+
+  bool operator==(const SDValue &O) const {
+    return Node == O.Node && ResNo == O.ResNo;
+  }
+  bool operator!=(const SDValue &O) const {
+    return !operator==(O);
+  }
+  bool operator<(const SDValue &O) const {
+    return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
+  }
+  explicit operator bool() const {
+    return Node != nullptr;
+  }
+
+  SDValue getValue(unsigned R) const {
+    return SDValue(Node, R);
+  }
+
+  /// Return true if this node is an operand of N.
+  bool isOperandOf(const SDNode *N) const;
+
+  /// Return the ValueType of the referenced return value.
+  inline EVT getValueType() const;
+
+  /// Return the simple ValueType of the referenced return value.
+  MVT getSimpleValueType() const {
+    return getValueType().getSimpleVT();
+  }
+
+  /// Returns the size of the value in bits.
+  unsigned getValueSizeInBits() const {
+    return getValueType().getSizeInBits();
+  }
+
+  unsigned getScalarValueSizeInBits() const {
+    return getValueType().getScalarType().getSizeInBits();
+  }
+
+  // Forwarding methods - These forward to the corresponding methods in SDNode.
+  inline unsigned getOpcode() const;
+  inline unsigned getNumOperands() const;
+  inline const SDValue &getOperand(unsigned i) const;
+  inline uint64_t getConstantOperandVal(unsigned i) const;
+  inline bool isTargetMemoryOpcode() const;
+  inline bool isTargetOpcode() const;
+  inline bool isMachineOpcode() const;
+  inline bool isUndef() const;
+  inline unsigned getMachineOpcode() const;
+  inline const DebugLoc &getDebugLoc() const;
+  inline void dump() const;
+  inline void dump(const SelectionDAG *G) const;
+  inline void dumpr() const;
+  inline void dumpr(const SelectionDAG *G) const;
+
+  /// Return true if this operand (which must be a chain) reaches the
+  /// specified operand without crossing any side-effecting instructions.
+  /// In practice, this looks through token factors and non-volatile loads.
+  /// In order to remain efficient, this only
+  /// looks a couple of nodes in, it does not do an exhaustive search.
+  bool reachesChainWithoutSideEffects(SDValue Dest,
+                                      unsigned Depth = 2) const;
+
+  /// Return true if there are no nodes using value ResNo of Node.
+  inline bool use_empty() const;
+
+  /// Return true if there is exactly one node using value ResNo of Node.
+  inline bool hasOneUse() const;
+};
+
+template<> struct DenseMapInfo<SDValue> {
+  static inline SDValue getEmptyKey() {
+    SDValue V;
+    V.ResNo = -1U;
+    return V;
+  }
+
+  static inline SDValue getTombstoneKey() {
+    SDValue V;
+    V.ResNo = -2U;
+    return V;
+  }
+
+  static unsigned getHashValue(const SDValue &Val) {
+    return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
+            (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
+  }
+
+  static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
+    return LHS == RHS;
+  }
+};
+template <> struct isPodLike<SDValue> { static const bool value = true; };
+
+/// Allow casting operators to work directly on
+/// SDValues as if they were SDNode*'s.
+template<> struct simplify_type<SDValue> {
+  using SimpleType = SDNode *;
+
+  static SimpleType getSimplifiedValue(SDValue &Val) {
+    return Val.getNode();
+  }
+};
+template<> struct simplify_type<const SDValue> {
+  using SimpleType = /*const*/ SDNode *;
+
+  static SimpleType getSimplifiedValue(const SDValue &Val) {
+    return Val.getNode();
+  }
+};
+
+/// Represents a use of a SDNode. This class holds an SDValue,
+/// which records the SDNode being used and the result number, a
+/// pointer to the SDNode using the value, and Next and Prev pointers,
+/// which link together all the uses of an SDNode.
+///
+class SDUse {
+  /// Val - The value being used.
+  SDValue Val;
+  /// User - The user of this value.
+  SDNode *User = nullptr;
+  /// Prev, Next - Pointers to the uses list of the SDNode referred by
+  /// this operand.
+  SDUse **Prev = nullptr;
+  SDUse *Next = nullptr;
+
+public:
+  SDUse() = default;
+  SDUse(const SDUse &U) = delete;
+  SDUse &operator=(const SDUse &) = delete;
+
+  /// Normally SDUse will just implicitly convert to an SDValue that it holds.
+  operator const SDValue&() const { return Val; }
+
+  /// If implicit conversion to SDValue doesn't work, the get() method returns
+  /// the SDValue.
+  const SDValue &get() const { return Val; }
+
+  /// This returns the SDNode that contains this Use.
+  SDNode *getUser() { return User; }
+
+  /// Get the next SDUse in the use list.
+  SDUse *getNext() const { return Next; }
+
+  /// Convenience function for get().getNode().
+  SDNode *getNode() const { return Val.getNode(); }
+  /// Convenience function for get().getResNo().
+  unsigned getResNo() const { return Val.getResNo(); }
+  /// Convenience function for get().getValueType().
+  EVT getValueType() const { return Val.getValueType(); }
+
+  /// Convenience function for get().operator==
+  bool operator==(const SDValue &V) const {
+    return Val == V;
+  }
+
+  /// Convenience function for get().operator!=
+  bool operator!=(const SDValue &V) const {
+    return Val != V;
+  }
+
+  /// Convenience function for get().operator<
+  bool operator<(const SDValue &V) const {
+    return Val < V;
+  }
+
+private:
+  friend class SelectionDAG;
+  friend class SDNode;
+  // TODO: unfriend HandleSDNode once we fix its operand handling.
+  friend class HandleSDNode;
+
+  void setUser(SDNode *p) { User = p; }
+
+  /// Remove this use from its existing use list, assign it the
+  /// given value, and add it to the new value's node's use list.
+  inline void set(const SDValue &V);
+  /// Like set, but only supports initializing a newly-allocated
+  /// SDUse with a non-null value.
+  inline void setInitial(const SDValue &V);
+  /// Like set, but only sets the Node portion of the value,
+  /// leaving the ResNo portion unmodified.
+  inline void setNode(SDNode *N);
+
+  void addToList(SDUse **List) {
+    Next = *List;
+    if (Next) Next->Prev = &Next;
+    Prev = List;
+    *List = this;
+  }
+
+  void removeFromList() {
+    *Prev = Next;
+    if (Next) Next->Prev = Prev;
+  }
+};
+
+/// simplify_type specializations - Allow casting operators to work directly on
+/// SDValues as if they were SDNode*'s.
+template<> struct simplify_type<SDUse> {
+  using SimpleType = SDNode *;
+
+  static SimpleType getSimplifiedValue(SDUse &Val) {
+    return Val.getNode();
+  }
+};
+
+/// These are IR-level optimization flags that may be propagated to SDNodes.
+/// TODO: This data structure should be shared by the IR optimizer and the
+/// the backend.
+struct SDNodeFlags {
+private:
+  // This bit is used to determine if the flags are in a defined state.
+  // Flag bits can only be masked out during intersection if the masking flags
+  // are defined.
+  bool AnyDefined : 1;
+
+  bool NoUnsignedWrap : 1;
+  bool NoSignedWrap : 1;
+  bool Exact : 1;
+  bool UnsafeAlgebra : 1;
+  bool NoNaNs : 1;
+  bool NoInfs : 1;
+  bool NoSignedZeros : 1;
+  bool AllowReciprocal : 1;
+  bool VectorReduction : 1;
+  bool AllowContract : 1;
+
+public:
+  /// Default constructor turns off all optimization flags.
+  SDNodeFlags()
+      : AnyDefined(false), NoUnsignedWrap(false), NoSignedWrap(false),
+        Exact(false), UnsafeAlgebra(false), NoNaNs(false), NoInfs(false),
+        NoSignedZeros(false), AllowReciprocal(false), VectorReduction(false),
+        AllowContract(false) {}
+
+  /// Sets the state of the flags to the defined state.
+  void setDefined() { AnyDefined = true; }
+  /// Returns true if the flags are in a defined state.
+  bool isDefined() const { return AnyDefined; }
+
+  // These are mutators for each flag.
+  void setNoUnsignedWrap(bool b) {
+    setDefined();
+    NoUnsignedWrap = b;
+  }
+  void setNoSignedWrap(bool b) {
+    setDefined();
+    NoSignedWrap = b;
+  }
+  void setExact(bool b) {
+    setDefined();
+    Exact = b;
+  }
+  void setUnsafeAlgebra(bool b) {
+    setDefined();
+    UnsafeAlgebra = b;
+  }
+  void setNoNaNs(bool b) {
+    setDefined();
+    NoNaNs = b;
+  }
+  void setNoInfs(bool b) {
+    setDefined();
+    NoInfs = b;
+  }
+  void setNoSignedZeros(bool b) {
+    setDefined();
+    NoSignedZeros = b;
+  }
+  void setAllowReciprocal(bool b) {
+    setDefined();
+    AllowReciprocal = b;
+  }
+  void setVectorReduction(bool b) {
+    setDefined();
+    VectorReduction = b;
+  }
+  void setAllowContract(bool b) {
+    setDefined();
+    AllowContract = b;
+  }
+
+  // These are accessors for each flag.
+  bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
+  bool hasNoSignedWrap() const { return NoSignedWrap; }
+  bool hasExact() const { return Exact; }
+  bool hasUnsafeAlgebra() const { return UnsafeAlgebra; }
+  bool hasNoNaNs() const { return NoNaNs; }
+  bool hasNoInfs() const { return NoInfs; }
+  bool hasNoSignedZeros() const { return NoSignedZeros; }
+  bool hasAllowReciprocal() const { return AllowReciprocal; }
+  bool hasVectorReduction() const { return VectorReduction; }
+  bool hasAllowContract() const { return AllowContract; }
+
+  /// Clear any flags in this flag set that aren't also set in Flags.
+  /// If the given Flags are undefined then don't do anything.
+  void intersectWith(const SDNodeFlags Flags) {
+    if (!Flags.isDefined())
+      return;
+    NoUnsignedWrap &= Flags.NoUnsignedWrap;
+    NoSignedWrap &= Flags.NoSignedWrap;
+    Exact &= Flags.Exact;
+    UnsafeAlgebra &= Flags.UnsafeAlgebra;
+    NoNaNs &= Flags.NoNaNs;
+    NoInfs &= Flags.NoInfs;
+    NoSignedZeros &= Flags.NoSignedZeros;
+    AllowReciprocal &= Flags.AllowReciprocal;
+    VectorReduction &= Flags.VectorReduction;
+    AllowContract &= Flags.AllowContract;
+  }
+};
+
+/// Represents one node in the SelectionDAG.
+///
+class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
+private:
+  /// The operation that this node performs.
+  int16_t NodeType;
+
+protected:
+  // We define a set of mini-helper classes to help us interpret the bits in our
+  // SubclassData.  These are designed to fit within a uint16_t so they pack
+  // with NodeType.
+
+  class SDNodeBitfields {
+    friend class SDNode;
+    friend class MemIntrinsicSDNode;
+    friend class MemSDNode;
+    friend class SelectionDAG;
+
+    uint16_t HasDebugValue : 1;
+    uint16_t IsMemIntrinsic : 1;
+    uint16_t IsDivergent : 1;
+  };
+  enum { NumSDNodeBits = 3 };
+
+  class ConstantSDNodeBitfields {
+    friend class ConstantSDNode;
+
+    uint16_t : NumSDNodeBits;
+
+    uint16_t IsOpaque : 1;
+  };
+
+  class MemSDNodeBitfields {
+    friend class MemSDNode;
+    friend class MemIntrinsicSDNode;
+    friend class AtomicSDNode;
+
+    uint16_t : NumSDNodeBits;
+
+    uint16_t IsVolatile : 1;
+    uint16_t IsNonTemporal : 1;
+    uint16_t IsDereferenceable : 1;
+    uint16_t IsInvariant : 1;
+  };
+  enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
+
+  class LSBaseSDNodeBitfields {
+    friend class LSBaseSDNode;
+
+    uint16_t : NumMemSDNodeBits;
+
+    uint16_t AddressingMode : 3; // enum ISD::MemIndexedMode
+  };
+  enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
+
+  class LoadSDNodeBitfields {
+    friend class LoadSDNode;
+    friend class MaskedLoadSDNode;
+
+    uint16_t : NumLSBaseSDNodeBits;
+
+    uint16_t ExtTy : 2; // enum ISD::LoadExtType
+    uint16_t IsExpanding : 1;
+  };
+
+  class StoreSDNodeBitfields {
+    friend class StoreSDNode;
+    friend class MaskedStoreSDNode;
+
+    uint16_t : NumLSBaseSDNodeBits;
+
+    uint16_t IsTruncating : 1;
+    uint16_t IsCompressing : 1;
+  };
+
+  union {
+    char RawSDNodeBits[sizeof(uint16_t)];
+    SDNodeBitfields SDNodeBits;
+    ConstantSDNodeBitfields ConstantSDNodeBits;
+    MemSDNodeBitfields MemSDNodeBits;
+    LSBaseSDNodeBitfields LSBaseSDNodeBits;
+    LoadSDNodeBitfields LoadSDNodeBits;
+    StoreSDNodeBitfields StoreSDNodeBits;
+  };
+
+  // RawSDNodeBits must cover the entirety of the union.  This means that all of
+  // the union's members must have size <= RawSDNodeBits.  We write the RHS as
+  // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
+  static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
+  static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
+  static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
+  static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
+  static_assert(sizeof(LoadSDNodeBitfields) <= 4, "field too wide");
+  static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
+
+private:
+  friend class SelectionDAG;
+  // TODO: unfriend HandleSDNode once we fix its operand handling.
+  friend class HandleSDNode;
+
+  /// Unique id per SDNode in the DAG.
+  int NodeId = -1;
+
+  /// The values that are used by this operation.
+  SDUse *OperandList = nullptr;
+
+  /// The types of the values this node defines.  SDNode's may
+  /// define multiple values simultaneously.
+  const EVT *ValueList;
+
+  /// List of uses for this SDNode.
+  SDUse *UseList = nullptr;
+
+  /// The number of entries in the Operand/Value list.
+  unsigned short NumOperands = 0;
+  unsigned short NumValues;
+
+  // The ordering of the SDNodes. It roughly corresponds to the ordering of the
+  // original LLVM instructions.
+  // This is used for turning off scheduling, because we'll forgo
+  // the normal scheduling algorithms and output the instructions according to
+  // this ordering.
+  unsigned IROrder;
+
+  /// Source line information.
+  DebugLoc debugLoc;
+
+  /// Return a pointer to the specified value type.
+  static const EVT *getValueTypeList(EVT VT);
+
+  SDNodeFlags Flags;
+
+public:
+  /// Unique and persistent id per SDNode in the DAG.
+  /// Used for debug printing.
+  uint16_t PersistentId;
+
+  //===--------------------------------------------------------------------===//
+  //  Accessors
+  //
+
+  /// Return the SelectionDAG opcode value for this node. For
+  /// pre-isel nodes (those for which isMachineOpcode returns false), these
+  /// are the opcode values in the ISD and <target>ISD namespaces. For
+  /// post-isel opcodes, see getMachineOpcode.
+  unsigned getOpcode()  const { return (unsigned short)NodeType; }
+
+  /// Test if this node has a target-specific opcode (in the
+  /// \<target\>ISD namespace).
+  bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
+
+  /// Test if this node has a target-specific
+  /// memory-referencing opcode (in the \<target\>ISD namespace and
+  /// greater than FIRST_TARGET_MEMORY_OPCODE).
+  bool isTargetMemoryOpcode() const {
+    return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
+  }
+
+  /// Return true if the type of the node type undefined.
+  bool isUndef() const { return NodeType == ISD::UNDEF; }
+
+  /// Test if this node is a memory intrinsic (with valid pointer information).
+  /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
+  /// non-memory intrinsics (with chains) that are not really instances of
+  /// MemSDNode. For such nodes, we need some extra state to determine the
+  /// proper classof relationship.
+  bool isMemIntrinsic() const {
+    return (NodeType == ISD::INTRINSIC_W_CHAIN ||
+            NodeType == ISD::INTRINSIC_VOID) &&
+           SDNodeBits.IsMemIntrinsic;
+  }
+
+  /// Test if this node is a strict floating point pseudo-op.
+  bool isStrictFPOpcode() {
+    switch (NodeType) {
+      default:
+        return false;
+      case ISD::STRICT_FADD:
+      case ISD::STRICT_FSUB:
+      case ISD::STRICT_FMUL:
+      case ISD::STRICT_FDIV:
+      case ISD::STRICT_FREM:
+      case ISD::STRICT_FMA:
+      case ISD::STRICT_FSQRT:
+      case ISD::STRICT_FPOW:
+      case ISD::STRICT_FPOWI:
+      case ISD::STRICT_FSIN:
+      case ISD::STRICT_FCOS:
+      case ISD::STRICT_FEXP:
+      case ISD::STRICT_FEXP2:
+      case ISD::STRICT_FLOG:
+      case ISD::STRICT_FLOG10:
+      case ISD::STRICT_FLOG2:
+      case ISD::STRICT_FRINT:
+      case ISD::STRICT_FNEARBYINT:
+        return true;
+    }
+  }
+
+  /// Test if this node has a post-isel opcode, directly
+  /// corresponding to a MachineInstr opcode.
+  bool isMachineOpcode() const { return NodeType < 0; }
+
+  /// This may only be called if isMachineOpcode returns
+  /// true. It returns the MachineInstr opcode value that the node's opcode
+  /// corresponds to.
+  unsigned getMachineOpcode() const {
+    assert(isMachineOpcode() && "Not a MachineInstr opcode!");
+    return ~NodeType;
+  }
+
+  bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
+  void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
+
+  bool isDivergent() const { return SDNodeBits.IsDivergent; }
+
+  /// Return true if there are no uses of this node.
+  bool use_empty() const { return UseList == nullptr; }
+
+  /// Return true if there is exactly one use of this node.
+  bool hasOneUse() const {
+    return !use_empty() && std::next(use_begin()) == use_end();
+  }
+
+  /// Return the number of uses of this node. This method takes
+  /// time proportional to the number of uses.
+  size_t use_size() const { return std::distance(use_begin(), use_end()); }
+
+  /// Return the unique node id.
+  int getNodeId() const { return NodeId; }
+
+  /// Set unique node id.
+  void setNodeId(int Id) { NodeId = Id; }
+
+  /// Return the node ordering.
+  unsigned getIROrder() const { return IROrder; }
+
+  /// Set the node ordering.
+  void setIROrder(unsigned Order) { IROrder = Order; }
+
+  /// Return the source location info.
+  const DebugLoc &getDebugLoc() const { return debugLoc; }
+
+  /// Set source location info.  Try to avoid this, putting
+  /// it in the constructor is preferable.
+  void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
+
+  /// This class provides iterator support for SDUse
+  /// operands that use a specific SDNode.
+  class use_iterator
+    : public std::iterator<std::forward_iterator_tag, SDUse, ptrdiff_t> {
+    friend class SDNode;
+
+    SDUse *Op = nullptr;
+
+    explicit use_iterator(SDUse *op) : Op(op) {}
+
+  public:
+    using reference = std::iterator<std::forward_iterator_tag,
+                                    SDUse, ptrdiff_t>::reference;
+    using pointer = std::iterator<std::forward_iterator_tag,
+                                  SDUse, ptrdiff_t>::pointer;
+
+    use_iterator() = default;
+    use_iterator(const use_iterator &I) : Op(I.Op) {}
+
+    bool operator==(const use_iterator &x) const {
+      return Op == x.Op;
+    }
+    bool operator!=(const use_iterator &x) const {
+      return !operator==(x);
+    }
+
+    /// Return true if this iterator is at the end of uses list.
+    bool atEnd() const { return Op == nullptr; }
+
+    // Iterator traversal: forward iteration only.
+    use_iterator &operator++() {          // Preincrement
+      assert(Op && "Cannot increment end iterator!");
+      Op = Op->getNext();
+      return *this;
+    }
+
+    use_iterator operator++(int) {        // Postincrement
+      use_iterator tmp = *this; ++*this; return tmp;
+    }
+
+    /// Retrieve a pointer to the current user node.
+    SDNode *operator*() const {
+      assert(Op && "Cannot dereference end iterator!");
+      return Op->getUser();
+    }
+
+    SDNode *operator->() const { return operator*(); }
+
+    SDUse &getUse() const { return *Op; }
+
+    /// Retrieve the operand # of this use in its user.
+    unsigned getOperandNo() const {
+      assert(Op && "Cannot dereference end iterator!");
+      return (unsigned)(Op - Op->getUser()->OperandList);
+    }
+  };
+
+  /// Provide iteration support to walk over all uses of an SDNode.
+  use_iterator use_begin() const {
+    return use_iterator(UseList);
+  }
+
+  static use_iterator use_end() { return use_iterator(nullptr); }
+
+  inline iterator_range<use_iterator> uses() {
+    return make_range(use_begin(), use_end());
+  }
+  inline iterator_range<use_iterator> uses() const {
+    return make_range(use_begin(), use_end());
+  }
+
+  /// Return true if there are exactly NUSES uses of the indicated value.
+  /// This method ignores uses of other values defined by this operation.
+  bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
+
+  /// Return true if there are any use of the indicated value.
+  /// This method ignores uses of other values defined by this operation.
+  bool hasAnyUseOfValue(unsigned Value) const;
+
+  /// Return true if this node is the only use of N.
+  bool isOnlyUserOf(const SDNode *N) const;
+
+  /// Return true if this node is an operand of N.
+  bool isOperandOf(const SDNode *N) const;
+
+  /// Return true if this node is a predecessor of N.
+  /// NOTE: Implemented on top of hasPredecessor and every bit as
+  /// expensive. Use carefully.
+  bool isPredecessorOf(const SDNode *N) const {
+    return N->hasPredecessor(this);
+  }
+
+  /// Return true if N is a predecessor of this node.
+  /// N is either an operand of this node, or can be reached by recursively
+  /// traversing up the operands.
+  /// NOTE: This is an expensive method. Use it carefully.
+  bool hasPredecessor(const SDNode *N) const;
+
+  /// Returns true if N is a predecessor of any node in Worklist. This
+  /// helper keeps Visited and Worklist sets externally to allow unions
+  /// searches to be performed in parallel, caching of results across
+  /// queries and incremental addition to Worklist. Stops early if N is
+  /// found but will resume. Remember to clear Visited and Worklists
+  /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
+  /// giving up. The TopologicalPrune flag signals that positive NodeIds are
+  /// topologically ordered (Operands have strictly smaller node id) and search
+  /// can be pruned leveraging this.
+  static bool hasPredecessorHelper(const SDNode *N,
+                                   SmallPtrSetImpl<const SDNode *> &Visited,
+                                   SmallVectorImpl<const SDNode *> &Worklist,
+                                   unsigned int MaxSteps = 0,
+                                   bool TopologicalPrune = false) {
+    SmallVector<const SDNode *, 8> DeferredNodes;
+    if (Visited.count(N))
+      return true;
+
+    // Node Id's are assigned in three places: As a topological
+    // ordering (> 0), during legalization (results in values set to
+    // 0), new nodes (set to -1). If N has a topolgical id then we
+    // know that all nodes with ids smaller than it cannot be
+    // successors and we need not check them. Filter out all node
+    // that can't be matches. We add them to the worklist before exit
+    // in case of multiple calls. Note that during selection the topological id
+    // may be violated if a node's predecessor is selected before it. We mark
+    // this at selection negating the id of unselected successors and
+    // restricting topological pruning to positive ids.
+
+    int NId = N->getNodeId();
+    // If we Invalidated the Id, reconstruct original NId.
+    if (NId < -1)
+      NId = -(NId + 1);
+
+    bool Found = false;
+    while (!Worklist.empty()) {
+      const SDNode *M = Worklist.pop_back_val();
+      int MId = M->getNodeId();
+      if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
+          (MId > 0) && (MId < NId)) {
+        DeferredNodes.push_back(M);
+        continue;
+      }
+      for (const SDValue &OpV : M->op_values()) {
+        SDNode *Op = OpV.getNode();
+        if (Visited.insert(Op).second)
+          Worklist.push_back(Op);
+        if (Op == N)
+          Found = true;
+      }
+      if (Found)
+        break;
+      if (MaxSteps != 0 && Visited.size() >= MaxSteps)
+        break;
+    }
+    // Push deferred nodes back on worklist.
+    Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
+    // If we bailed early, conservatively return found.
+    if (MaxSteps != 0 && Visited.size() >= MaxSteps)
+      return true;
+    return Found;
+  }
+
+  /// Return true if all the users of N are contained in Nodes.
+  /// NOTE: Requires at least one match, but doesn't require them all.
+  static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);
+
+  /// Return the number of values used by this operation.
+  unsigned getNumOperands() const { return NumOperands; }
+
+  /// Helper method returns the integer value of a ConstantSDNode operand.
+  inline uint64_t getConstantOperandVal(unsigned Num) const;
+
+  const SDValue &getOperand(unsigned Num) const {
+    assert(Num < NumOperands && "Invalid child # of SDNode!");
+    return OperandList[Num];
+  }
+
+  using op_iterator = SDUse *;
+
+  op_iterator op_begin() const { return OperandList; }
+  op_iterator op_end() const { return OperandList+NumOperands; }
+  ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
+
+  /// Iterator for directly iterating over the operand SDValue's.
+  struct value_op_iterator
+      : iterator_adaptor_base<value_op_iterator, op_iterator,
+                              std::random_access_iterator_tag, SDValue,
+                              ptrdiff_t, value_op_iterator *,
+                              value_op_iterator *> {
+    explicit value_op_iterator(SDUse *U = nullptr)
+      : iterator_adaptor_base(U) {}
+
+    const SDValue &operator*() const { return I->get(); }
+  };
+
+  iterator_range<value_op_iterator> op_values() const {
+    return make_range(value_op_iterator(op_begin()),
+                      value_op_iterator(op_end()));
+  }
+
+  SDVTList getVTList() const {
+    SDVTList X = { ValueList, NumValues };
+    return X;
+  }
+
+  /// If this node has a glue operand, return the node
+  /// to which the glue operand points. Otherwise return NULL.
+  SDNode *getGluedNode() const {
+    if (getNumOperands() != 0 &&
+        getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
+      return getOperand(getNumOperands()-1).getNode();
+    return nullptr;
+  }
+
+  /// If this node has a glue value with a user, return
+  /// the user (there is at most one). Otherwise return NULL.
+  SDNode *getGluedUser() const {
+    for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
+      if (UI.getUse().get().getValueType() == MVT::Glue)
+        return *UI;
+    return nullptr;
+  }
+
+  const SDNodeFlags getFlags() const { return Flags; }
+  void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
+
+  /// Clear any flags in this node that aren't also set in Flags.
+  /// If Flags is not in a defined state then this has no effect.
+  void intersectFlagsWith(const SDNodeFlags Flags);
+
+  /// Return the number of values defined/returned by this operator.
+  unsigned getNumValues() const { return NumValues; }
+
+  /// Return the type of a specified result.
+  EVT getValueType(unsigned ResNo) const {
+    assert(ResNo < NumValues && "Illegal result number!");
+    return ValueList[ResNo];
+  }
+
+  /// Return the type of a specified result as a simple type.
+  MVT getSimpleValueType(unsigned ResNo) const {
+    return getValueType(ResNo).getSimpleVT();
+  }
+
+  /// Returns MVT::getSizeInBits(getValueType(ResNo)).
+  unsigned getValueSizeInBits(unsigned ResNo) const {
+    return getValueType(ResNo).getSizeInBits();
+  }
+
+  using value_iterator = const EVT *;
+
+  value_iterator value_begin() const { return ValueList; }
+  value_iterator value_end() const { return ValueList+NumValues; }
+
+  /// Return the opcode of this operation for printing.
+  std::string getOperationName(const SelectionDAG *G = nullptr) const;
+  static const char* getIndexedModeName(ISD::MemIndexedMode AM);
+  void print_types(raw_ostream &OS, const SelectionDAG *G) const;
+  void print_details(raw_ostream &OS, const SelectionDAG *G) const;
+  void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
+  void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
+
+  /// Print a SelectionDAG node and all children down to
+  /// the leaves.  The given SelectionDAG allows target-specific nodes
+  /// to be printed in human-readable form.  Unlike printr, this will
+  /// print the whole DAG, including children that appear multiple
+  /// times.
+  ///
+  void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
+
+  /// Print a SelectionDAG node and children up to
+  /// depth "depth."  The given SelectionDAG allows target-specific
+  /// nodes to be printed in human-readable form.  Unlike printr, this
+  /// will print children that appear multiple times wherever they are
+  /// used.
+  ///
+  void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
+                       unsigned depth = 100) const;
+
+  /// Dump this node, for debugging.
+  void dump() const;
+
+  /// Dump (recursively) this node and its use-def subgraph.
+  void dumpr() const;
+
+  /// Dump this node, for debugging.
+  /// The given SelectionDAG allows target-specific nodes to be printed
+  /// in human-readable form.
+  void dump(const SelectionDAG *G) const;
+
+  /// Dump (recursively) this node and its use-def subgraph.
+  /// The given SelectionDAG allows target-specific nodes to be printed
+  /// in human-readable form.
+  void dumpr(const SelectionDAG *G) const;
+
+  /// printrFull to dbgs().  The given SelectionDAG allows
+  /// target-specific nodes to be printed in human-readable form.
+  /// Unlike dumpr, this will print the whole DAG, including children
+  /// that appear multiple times.
+  void dumprFull(const SelectionDAG *G = nullptr) const;
+
+  /// printrWithDepth to dbgs().  The given
+  /// SelectionDAG allows target-specific nodes to be printed in
+  /// human-readable form.  Unlike dumpr, this will print children
+  /// that appear multiple times wherever they are used.
+  ///
+  void dumprWithDepth(const SelectionDAG *G = nullptr,
+                      unsigned depth = 100) const;
+
+  /// Gather unique data for the node.
+  void Profile(FoldingSetNodeID &ID) const;
+
+  /// This method should only be used by the SDUse class.
+  void addUse(SDUse &U) { U.addToList(&UseList); }
+
+protected:
+  static SDVTList getSDVTList(EVT VT) {
+    SDVTList Ret = { getValueTypeList(VT), 1 };
+    return Ret;
+  }
+
+  /// Create an SDNode.
+  ///
+  /// SDNodes are created without any operands, and never own the operand
+  /// storage. To add operands, see SelectionDAG::createOperands.
+  SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
+      : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
+        IROrder(Order), debugLoc(std::move(dl)) {
+    memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
+    assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
+    assert(NumValues == VTs.NumVTs &&
+           "NumValues wasn't wide enough for its operands!");
+  }
+
+  /// Release the operands and set this node to have zero operands.
+  void DropOperands();
+};
+
+/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
+/// into SDNode creation functions.
+/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
+/// from the original Instruction, and IROrder is the ordinal position of
+/// the instruction.
+/// When an SDNode is created after the DAG is being built, both DebugLoc and
+/// the IROrder are propagated from the original SDNode.
+/// So SDLoc class provides two constructors besides the default one, one to
+/// be used by the DAGBuilder, the other to be used by others.
+class SDLoc {
+private:
+  DebugLoc DL;
+  int IROrder = 0;
+
+public:
+  SDLoc() = default;
+  SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
+  SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
+  SDLoc(const Instruction *I, int Order) : IROrder(Order) {
+    assert(Order >= 0 && "bad IROrder");
+    if (I)
+      DL = I->getDebugLoc();
+  }
+
+  unsigned getIROrder() const { return IROrder; }
+  const DebugLoc &getDebugLoc() const { return DL; }
+};
+
+// Define inline functions from the SDValue class.
+
+inline SDValue::SDValue(SDNode *node, unsigned resno)
+    : Node(node), ResNo(resno) {
+  // Explicitly check for !ResNo to avoid use-after-free, because there are
+  // callers that use SDValue(N, 0) with a deleted N to indicate successful
+  // combines.
+  assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&
+         "Invalid result number for the given node!");
+  assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.");
+}
+
+inline unsigned SDValue::getOpcode() const {
+  return Node->getOpcode();
+}
+
+inline EVT SDValue::getValueType() const {
+  return Node->getValueType(ResNo);
+}
+
+inline unsigned SDValue::getNumOperands() const {
+  return Node->getNumOperands();
+}
+
+inline const SDValue &SDValue::getOperand(unsigned i) const {
+  return Node->getOperand(i);
+}
+
+inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
+  return Node->getConstantOperandVal(i);
+}
+
+inline bool SDValue::isTargetOpcode() const {
+  return Node->isTargetOpcode();
+}
+
+inline bool SDValue::isTargetMemoryOpcode() const {
+  return Node->isTargetMemoryOpcode();
+}
+
+inline bool SDValue::isMachineOpcode() const {
+  return Node->isMachineOpcode();
+}
+
+inline unsigned SDValue::getMachineOpcode() const {
+  return Node->getMachineOpcode();
+}
+
+inline bool SDValue::isUndef() const {
+  return Node->isUndef();
+}
+
+inline bool SDValue::use_empty() const {
+  return !Node->hasAnyUseOfValue(ResNo);
+}
+
+inline bool SDValue::hasOneUse() const {
+  return Node->hasNUsesOfValue(1, ResNo);
+}
+
+inline const DebugLoc &SDValue::getDebugLoc() const {
+  return Node->getDebugLoc();
+}
+
+inline void SDValue::dump() const {
+  return Node->dump();
+}
+
+inline void SDValue::dump(const SelectionDAG *G) const {
+  return Node->dump(G);
+}
+
+inline void SDValue::dumpr() const {
+  return Node->dumpr();
+}
+
+inline void SDValue::dumpr(const SelectionDAG *G) const {
+  return Node->dumpr(G);
+}
+
+// Define inline functions from the SDUse class.
+
+inline void SDUse::set(const SDValue &V) {
+  if (Val.getNode()) removeFromList();
+  Val = V;
+  if (V.getNode()) V.getNode()->addUse(*this);
+}
+
+inline void SDUse::setInitial(const SDValue &V) {
+  Val = V;
+  V.getNode()->addUse(*this);
+}
+
+inline void SDUse::setNode(SDNode *N) {
+  if (Val.getNode()) removeFromList();
+  Val.setNode(N);
+  if (N) N->addUse(*this);
+}
+
+/// This class is used to form a handle around another node that
+/// is persistent and is updated across invocations of replaceAllUsesWith on its
+/// operand.  This node should be directly created by end-users and not added to
+/// the AllNodes list.
+class HandleSDNode : public SDNode {
+  SDUse Op;
+
+public:
+  explicit HandleSDNode(SDValue X)
+    : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
+    // HandleSDNodes are never inserted into the DAG, so they won't be
+    // auto-numbered. Use ID 65535 as a sentinel.
+    PersistentId = 0xffff;
+
+    // Manually set up the operand list. This node type is special in that it's
+    // always stack allocated and SelectionDAG does not manage its operands.
+    // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not
+    // be so special.
+    Op.setUser(this);
+    Op.setInitial(X);
+    NumOperands = 1;
+    OperandList = &Op;
+  }
+  ~HandleSDNode();
+
+  const SDValue &getValue() const { return Op; }
+};
+
+class AddrSpaceCastSDNode : public SDNode {
+private:
+  unsigned SrcAddrSpace;
+  unsigned DestAddrSpace;
+
+public:
+  AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
+                      unsigned SrcAS, unsigned DestAS);
+
+  unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
+  unsigned getDestAddressSpace() const { return DestAddrSpace; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::ADDRSPACECAST;
+  }
+};
+
+/// This is an abstract virtual class for memory operations.
+class MemSDNode : public SDNode {
+private:
+  // VT of in-memory value.
+  EVT MemoryVT;
+
+protected:
+  /// Memory reference information.
+  MachineMemOperand *MMO;
+
+public:
+  MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
+            EVT MemoryVT, MachineMemOperand *MMO);
+
+  bool readMem() const { return MMO->isLoad(); }
+  bool writeMem() const { return MMO->isStore(); }
+
+  /// Returns alignment and volatility of the memory access
+  unsigned getOriginalAlignment() const {
+    return MMO->getBaseAlignment();
+  }
+  unsigned getAlignment() const {
+    return MMO->getAlignment();
+  }
+
+  /// Return the SubclassData value, without HasDebugValue. This contains an
+  /// encoding of the volatile flag, as well as bits used by subclasses. This
+  /// function should only be used to compute a FoldingSetNodeID value.
+  /// The HasDebugValue bit is masked out because CSE map needs to match
+  /// nodes with debug info with nodes without debug info. Same is about
+  /// isDivergent bit.
+  unsigned getRawSubclassData() const {
+    uint16_t Data;
+    union {
+      char RawSDNodeBits[sizeof(uint16_t)];
+      SDNodeBitfields SDNodeBits;
+    };
+    memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
+    SDNodeBits.HasDebugValue = 0;
+    SDNodeBits.IsDivergent = false;
+    memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
+    return Data;
+  }
+
+  bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
+  bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
+  bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
+  bool isInvariant() const { return MemSDNodeBits.IsInvariant; }
+
+  // Returns the offset from the location of the access.
+  int64_t getSrcValueOffset() const { return MMO->getOffset(); }
+
+  /// Returns the AA info that describes the dereference.
+  AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
+
+  /// Returns the Ranges that describes the dereference.
+  const MDNode *getRanges() const { return MMO->getRanges(); }
+
+  /// Returns the synchronization scope ID for this memory operation.
+  SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
+
+  /// Return the atomic ordering requirements for this memory operation. For
+  /// cmpxchg atomic operations, return the atomic ordering requirements when
+  /// store occurs.
+  AtomicOrdering getOrdering() const { return MMO->getOrdering(); }
+
+  /// Return the type of the in-memory value.
+  EVT getMemoryVT() const { return MemoryVT; }
+
+  /// Return a MachineMemOperand object describing the memory
+  /// reference performed by operation.
+  MachineMemOperand *getMemOperand() const { return MMO; }
+
+  const MachinePointerInfo &getPointerInfo() const {
+    return MMO->getPointerInfo();
+  }
+
+  /// Return the address space for the associated pointer
+  unsigned getAddressSpace() const {
+    return getPointerInfo().getAddrSpace();
+  }
+
+  /// Update this MemSDNode's MachineMemOperand information
+  /// to reflect the alignment of NewMMO, if it has a greater alignment.
+  /// This must only be used when the new alignment applies to all users of
+  /// this MachineMemOperand.
+  void refineAlignment(const MachineMemOperand *NewMMO) {
+    MMO->refineAlignment(NewMMO);
+  }
+
+  const SDValue &getChain() const { return getOperand(0); }
+  const SDValue &getBasePtr() const {
+    return getOperand(getOpcode() == ISD::STORE ? 2 : 1);
+  }
+
+  // Methods to support isa and dyn_cast
+  static bool classof(const SDNode *N) {
+    // For some targets, we lower some target intrinsics to a MemIntrinsicNode
+    // with either an intrinsic or a target opcode.
+    return N->getOpcode() == ISD::LOAD                ||
+           N->getOpcode() == ISD::STORE               ||
+           N->getOpcode() == ISD::PREFETCH            ||
+           N->getOpcode() == ISD::ATOMIC_CMP_SWAP     ||
+           N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
+           N->getOpcode() == ISD::ATOMIC_SWAP         ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_ADD     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_SUB     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_AND     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_CLR     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_OR      ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_XOR     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_NAND    ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_MIN     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_MAX     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_UMIN    ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_UMAX    ||
+           N->getOpcode() == ISD::ATOMIC_LOAD         ||
+           N->getOpcode() == ISD::ATOMIC_STORE        ||
+           N->getOpcode() == ISD::MLOAD               ||
+           N->getOpcode() == ISD::MSTORE              ||
+           N->getOpcode() == ISD::MGATHER             ||
+           N->getOpcode() == ISD::MSCATTER            ||
+           N->isMemIntrinsic()                        ||
+           N->isTargetMemoryOpcode();
+  }
+};
+
+/// This is an SDNode representing atomic operations.
+class AtomicSDNode : public MemSDNode {
+public:
+  AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
+               EVT MemVT, MachineMemOperand *MMO)
+      : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {}
+
+  const SDValue &getBasePtr() const { return getOperand(1); }
+  const SDValue &getVal() const { return getOperand(2); }
+
+  /// Returns true if this SDNode represents cmpxchg atomic operation, false
+  /// otherwise.
+  bool isCompareAndSwap() const {
+    unsigned Op = getOpcode();
+    return Op == ISD::ATOMIC_CMP_SWAP ||
+           Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
+  }
+
+  /// For cmpxchg atomic operations, return the atomic ordering requirements
+  /// when store does not occur.
+  AtomicOrdering getFailureOrdering() const {
+    assert(isCompareAndSwap() && "Must be cmpxchg operation");
+    return MMO->getFailureOrdering();
+  }
+
+  // Methods to support isa and dyn_cast
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::ATOMIC_CMP_SWAP     ||
+           N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
+           N->getOpcode() == ISD::ATOMIC_SWAP         ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_ADD     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_SUB     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_AND     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_CLR     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_OR      ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_XOR     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_NAND    ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_MIN     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_MAX     ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_UMIN    ||
+           N->getOpcode() == ISD::ATOMIC_LOAD_UMAX    ||
+           N->getOpcode() == ISD::ATOMIC_LOAD         ||
+           N->getOpcode() == ISD::ATOMIC_STORE;
+  }
+};
+
+/// This SDNode is used for target intrinsics that touch
+/// memory and need an associated MachineMemOperand. Its opcode may be
+/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
+/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
+class MemIntrinsicSDNode : public MemSDNode {
+public:
+  MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
+                     SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
+      : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
+    SDNodeBits.IsMemIntrinsic = true;
+  }
+
+  // Methods to support isa and dyn_cast
+  static bool classof(const SDNode *N) {
+    // We lower some target intrinsics to their target opcode
+    // early a node with a target opcode can be of this class
+    return N->isMemIntrinsic()             ||
+           N->getOpcode() == ISD::PREFETCH ||
+           N->isTargetMemoryOpcode();
+  }
+};
+
+/// This SDNode is used to implement the code generator
+/// support for the llvm IR shufflevector instruction.  It combines elements
+/// from two input vectors into a new input vector, with the selection and
+/// ordering of elements determined by an array of integers, referred to as
+/// the shuffle mask.  For input vectors of width N, mask indices of 0..N-1
+/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
+/// An index of -1 is treated as undef, such that the code generator may put
+/// any value in the corresponding element of the result.
+class ShuffleVectorSDNode : public SDNode {
+  // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
+  // is freed when the SelectionDAG object is destroyed.
+  const int *Mask;
+
+protected:
+  friend class SelectionDAG;
+
+  ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
+      : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}
+
+public:
+  ArrayRef<int> getMask() const {
+    EVT VT = getValueType(0);
+    return makeArrayRef(Mask, VT.getVectorNumElements());
+  }
+
+  int getMaskElt(unsigned Idx) const {
+    assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!");
+    return Mask[Idx];
+  }
+
+  bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
+
+  int  getSplatIndex() const {
+    assert(isSplat() && "Cannot get splat index for non-splat!");
+    EVT VT = getValueType(0);
+    for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
+      if (Mask[i] >= 0)
+        return Mask[i];
+    }
+    llvm_unreachable("Splat with all undef indices?");
+  }
+
+  static bool isSplatMask(const int *Mask, EVT VT);
+
+  /// Change values in a shuffle permute mask assuming
+  /// the two vector operands have swapped position.
+  static void commuteMask(MutableArrayRef<int> Mask) {
+    unsigned NumElems = Mask.size();
+    for (unsigned i = 0; i != NumElems; ++i) {
+      int idx = Mask[i];
+      if (idx < 0)
+        continue;
+      else if (idx < (int)NumElems)
+        Mask[i] = idx + NumElems;
+      else
+        Mask[i] = idx - NumElems;
+    }
+  }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::VECTOR_SHUFFLE;
+  }
+};
+
+class ConstantSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  const ConstantInt *Value;
+
+  ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val,
+                 const DebugLoc &DL, EVT VT)
+      : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DL,
+               getSDVTList(VT)),
+        Value(val) {
+    ConstantSDNodeBits.IsOpaque = isOpaque;
+  }
+
+public:
+  const ConstantInt *getConstantIntValue() const { return Value; }
+  const APInt &getAPIntValue() const { return Value->getValue(); }
+  uint64_t getZExtValue() const { return Value->getZExtValue(); }
+  int64_t getSExtValue() const { return Value->getSExtValue(); }
+  uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX) {
+    return Value->getLimitedValue(Limit);
+  }
+
+  bool isOne() const { return Value->isOne(); }
+  bool isNullValue() const { return Value->isZero(); }
+  bool isAllOnesValue() const { return Value->isMinusOne(); }
+
+  bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::Constant ||
+           N->getOpcode() == ISD::TargetConstant;
+  }
+};
+
+uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
+  return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
+}
+
+class ConstantFPSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  const ConstantFP *Value;
+
+  ConstantFPSDNode(bool isTarget, const ConstantFP *val, const DebugLoc &DL,
+                   EVT VT)
+      : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0, DL,
+               getSDVTList(VT)),
+        Value(val) {}
+
+public:
+  const APFloat& getValueAPF() const { return Value->getValueAPF(); }
+  const ConstantFP *getConstantFPValue() const { return Value; }
+
+  /// Return true if the value is positive or negative zero.
+  bool isZero() const { return Value->isZero(); }
+
+  /// Return true if the value is a NaN.
+  bool isNaN() const { return Value->isNaN(); }
+
+  /// Return true if the value is an infinity
+  bool isInfinity() const { return Value->isInfinity(); }
+
+  /// Return true if the value is negative.
+  bool isNegative() const { return Value->isNegative(); }
+
+  /// We don't rely on operator== working on double values, as
+  /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
+  /// As such, this method can be used to do an exact bit-for-bit comparison of
+  /// two floating point values.
+
+  /// We leave the version with the double argument here because it's just so
+  /// convenient to write "2.0" and the like.  Without this function we'd
+  /// have to duplicate its logic everywhere it's called.
+  bool isExactlyValue(double V) const {
+    return Value->getValueAPF().isExactlyValue(V);
+  }
+  bool isExactlyValue(const APFloat& V) const;
+
+  static bool isValueValidForType(EVT VT, const APFloat& Val);
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::ConstantFP ||
+           N->getOpcode() == ISD::TargetConstantFP;
+  }
+};
+
+/// Returns true if \p V is a constant integer zero.
+bool isNullConstant(SDValue V);
+
+/// Returns true if \p V is an FP constant with a value of positive zero.
+bool isNullFPConstant(SDValue V);
+
+/// Returns true if \p V is an integer constant with all bits set.
+bool isAllOnesConstant(SDValue V);
+
+/// Returns true if \p V is a constant integer one.
+bool isOneConstant(SDValue V);
+
+/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
+/// constant is canonicalized to be operand 1.
+bool isBitwiseNot(SDValue V);
+
+/// Returns the SDNode if it is a constant splat BuildVector or constant int.
+ConstantSDNode *isConstOrConstSplat(SDValue V);
+
+/// Returns the SDNode if it is a constant splat BuildVector or constant float.
+ConstantFPSDNode *isConstOrConstSplatFP(SDValue V);
+
+class GlobalAddressSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  const GlobalValue *TheGlobal;
+  int64_t Offset;
+  unsigned char TargetFlags;
+
+  GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL,
+                      const GlobalValue *GA, EVT VT, int64_t o,
+                      unsigned char TargetFlags);
+
+public:
+  const GlobalValue *getGlobal() const { return TheGlobal; }
+  int64_t getOffset() const { return Offset; }
+  unsigned char getTargetFlags() const { return TargetFlags; }
+  // Return the address space this GlobalAddress belongs to.
+  unsigned getAddressSpace() const;
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::GlobalAddress ||
+           N->getOpcode() == ISD::TargetGlobalAddress ||
+           N->getOpcode() == ISD::GlobalTLSAddress ||
+           N->getOpcode() == ISD::TargetGlobalTLSAddress;
+  }
+};
+
+class FrameIndexSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  int FI;
+
+  FrameIndexSDNode(int fi, EVT VT, bool isTarg)
+    : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
+      0, DebugLoc(), getSDVTList(VT)), FI(fi) {
+  }
+
+public:
+  int getIndex() const { return FI; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::FrameIndex ||
+           N->getOpcode() == ISD::TargetFrameIndex;
+  }
+};
+
+class JumpTableSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  int JTI;
+  unsigned char TargetFlags;
+
+  JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned char TF)
+    : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
+      0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
+  }
+
+public:
+  int getIndex() const { return JTI; }
+  unsigned char getTargetFlags() const { return TargetFlags; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::JumpTable ||
+           N->getOpcode() == ISD::TargetJumpTable;
+  }
+};
+
+class ConstantPoolSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  union {
+    const Constant *ConstVal;
+    MachineConstantPoolValue *MachineCPVal;
+  } Val;
+  int Offset;  // It's a MachineConstantPoolValue if top bit is set.
+  unsigned Alignment;  // Minimum alignment requirement of CP (not log2 value).
+  unsigned char TargetFlags;
+
+  ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
+                     unsigned Align, unsigned char TF)
+    : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
+             DebugLoc(), getSDVTList(VT)), Offset(o), Alignment(Align),
+             TargetFlags(TF) {
+    assert(Offset >= 0 && "Offset is too large");
+    Val.ConstVal = c;
+  }
+
+  ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v,
+                     EVT VT, int o, unsigned Align, unsigned char TF)
+    : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
+             DebugLoc(), getSDVTList(VT)), Offset(o), Alignment(Align),
+             TargetFlags(TF) {
+    assert(Offset >= 0 && "Offset is too large");
+    Val.MachineCPVal = v;
+    Offset |= 1 << (sizeof(unsigned)*CHAR_BIT-1);
+  }
+
+public:
+  bool isMachineConstantPoolEntry() const {
+    return Offset < 0;
+  }
+
+  const Constant *getConstVal() const {
+    assert(!isMachineConstantPoolEntry() && "Wrong constantpool type");
+    return Val.ConstVal;
+  }
+
+  MachineConstantPoolValue *getMachineCPVal() const {
+    assert(isMachineConstantPoolEntry() && "Wrong constantpool type");
+    return Val.MachineCPVal;
+  }
+
+  int getOffset() const {
+    return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT-1));
+  }
+
+  // Return the alignment of this constant pool object, which is either 0 (for
+  // default alignment) or the desired value.
+  unsigned getAlignment() const { return Alignment; }
+  unsigned char getTargetFlags() const { return TargetFlags; }
+
+  Type *getType() const;
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::ConstantPool ||
+           N->getOpcode() == ISD::TargetConstantPool;
+  }
+};
+
+/// Completely target-dependent object reference.
+class TargetIndexSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  unsigned char TargetFlags;
+  int Index;
+  int64_t Offset;
+
+public:
+  TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned char TF)
+    : SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)),
+      TargetFlags(TF), Index(Idx), Offset(Ofs) {}
+
+  unsigned char getTargetFlags() const { return TargetFlags; }
+  int getIndex() const { return Index; }
+  int64_t getOffset() const { return Offset; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::TargetIndex;
+  }
+};
+
+class BasicBlockSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  MachineBasicBlock *MBB;
+
+  /// Debug info is meaningful and potentially useful here, but we create
+  /// blocks out of order when they're jumped to, which makes it a bit
+  /// harder.  Let's see if we need it first.
+  explicit BasicBlockSDNode(MachineBasicBlock *mbb)
+    : SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb)
+  {}
+
+public:
+  MachineBasicBlock *getBasicBlock() const { return MBB; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::BasicBlock;
+  }
+};
+
+/// A "pseudo-class" with methods for operating on BUILD_VECTORs.
+class BuildVectorSDNode : public SDNode {
+public:
+  // These are constructed as SDNodes and then cast to BuildVectorSDNodes.
+  explicit BuildVectorSDNode() = delete;
+
+  /// Check if this is a constant splat, and if so, find the
+  /// smallest element size that splats the vector.  If MinSplatBits is
+  /// nonzero, the element size must be at least that large.  Note that the
+  /// splat element may be the entire vector (i.e., a one element vector).
+  /// Returns the splat element value in SplatValue.  Any undefined bits in
+  /// that value are zero, and the corresponding bits in the SplatUndef mask
+  /// are set.  The SplatBitSize value is set to the splat element size in
+  /// bits.  HasAnyUndefs is set to true if any bits in the vector are
+  /// undefined.  isBigEndian describes the endianness of the target.
+  bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
+                       unsigned &SplatBitSize, bool &HasAnyUndefs,
+                       unsigned MinSplatBits = 0,
+                       bool isBigEndian = false) const;
+
+  /// \brief Returns the splatted value or a null value if this is not a splat.
+  ///
+  /// If passed a non-null UndefElements bitvector, it will resize it to match
+  /// the vector width and set the bits where elements are undef.
+  SDValue getSplatValue(BitVector *UndefElements = nullptr) const;
+
+  /// \brief Returns the splatted constant or null if this is not a constant
+  /// splat.
+  ///
+  /// If passed a non-null UndefElements bitvector, it will resize it to match
+  /// the vector width and set the bits where elements are undef.
+  ConstantSDNode *
+  getConstantSplatNode(BitVector *UndefElements = nullptr) const;
+
+  /// \brief Returns the splatted constant FP or null if this is not a constant
+  /// FP splat.
+  ///
+  /// If passed a non-null UndefElements bitvector, it will resize it to match
+  /// the vector width and set the bits where elements are undef.
+  ConstantFPSDNode *
+  getConstantFPSplatNode(BitVector *UndefElements = nullptr) const;
+
+  /// \brief If this is a constant FP splat and the splatted constant FP is an
+  /// exact power or 2, return the log base 2 integer value.  Otherwise,
+  /// return -1.
+  ///
+  /// The BitWidth specifies the necessary bit precision.
+  int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
+                                          uint32_t BitWidth) const;
+
+  bool isConstant() const;
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::BUILD_VECTOR;
+  }
+};
+
+/// An SDNode that holds an arbitrary LLVM IR Value. This is
+/// used when the SelectionDAG needs to make a simple reference to something
+/// in the LLVM IR representation.
+///
+class SrcValueSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  const Value *V;
+
+  /// Create a SrcValue for a general value.
+  explicit SrcValueSDNode(const Value *v)
+    : SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {}
+
+public:
+  /// Return the contained Value.
+  const Value *getValue() const { return V; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::SRCVALUE;
+  }
+};
+
+class MDNodeSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  const MDNode *MD;
+
+  explicit MDNodeSDNode(const MDNode *md)
+  : SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md)
+  {}
+
+public:
+  const MDNode *getMD() const { return MD; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::MDNODE_SDNODE;
+  }
+};
+
+class RegisterSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  unsigned Reg;
+
+  RegisterSDNode(unsigned reg, EVT VT)
+    : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {}
+
+public:
+  unsigned getReg() const { return Reg; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::Register;
+  }
+};
+
+class RegisterMaskSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  // The memory for RegMask is not owned by the node.
+  const uint32_t *RegMask;
+
+  RegisterMaskSDNode(const uint32_t *mask)
+    : SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)),
+      RegMask(mask) {}
+
+public:
+  const uint32_t *getRegMask() const { return RegMask; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::RegisterMask;
+  }
+};
+
+class BlockAddressSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  const BlockAddress *BA;
+  int64_t Offset;
+  unsigned char TargetFlags;
+
+  BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
+                     int64_t o, unsigned char Flags)
+    : SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)),
+             BA(ba), Offset(o), TargetFlags(Flags) {}
+
+public:
+  const BlockAddress *getBlockAddress() const { return BA; }
+  int64_t getOffset() const { return Offset; }
+  unsigned char getTargetFlags() const { return TargetFlags; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::BlockAddress ||
+           N->getOpcode() == ISD::TargetBlockAddress;
+  }
+};
+
+class LabelSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  MCSymbol *Label;
+
+  LabelSDNode(unsigned Order, const DebugLoc &dl, MCSymbol *L)
+      : SDNode(ISD::EH_LABEL, Order, dl, getSDVTList(MVT::Other)), Label(L) {}
+
+public:
+  MCSymbol *getLabel() const { return Label; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::EH_LABEL ||
+           N->getOpcode() == ISD::ANNOTATION_LABEL;
+  }
+};
+
+class ExternalSymbolSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  const char *Symbol;
+  unsigned char TargetFlags;
+
+  ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned char TF, EVT VT)
+    : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol,
+             0, DebugLoc(), getSDVTList(VT)), Symbol(Sym), TargetFlags(TF) {}
+
+public:
+  const char *getSymbol() const { return Symbol; }
+  unsigned char getTargetFlags() const { return TargetFlags; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::ExternalSymbol ||
+           N->getOpcode() == ISD::TargetExternalSymbol;
+  }
+};
+
+class MCSymbolSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  MCSymbol *Symbol;
+
+  MCSymbolSDNode(MCSymbol *Symbol, EVT VT)
+      : SDNode(ISD::MCSymbol, 0, DebugLoc(), getSDVTList(VT)), Symbol(Symbol) {}
+
+public:
+  MCSymbol *getMCSymbol() const { return Symbol; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::MCSymbol;
+  }
+};
+
+class CondCodeSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  ISD::CondCode Condition;
+
+  explicit CondCodeSDNode(ISD::CondCode Cond)
+    : SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)),
+      Condition(Cond) {}
+
+public:
+  ISD::CondCode get() const { return Condition; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::CONDCODE;
+  }
+};
+
+/// This class is used to represent EVT's, which are used
+/// to parameterize some operations.
+class VTSDNode : public SDNode {
+  friend class SelectionDAG;
+
+  EVT ValueType;
+
+  explicit VTSDNode(EVT VT)
+    : SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)),
+      ValueType(VT) {}
+
+public:
+  EVT getVT() const { return ValueType; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::VALUETYPE;
+  }
+};
+
+/// Base class for LoadSDNode and StoreSDNode
+class LSBaseSDNode : public MemSDNode {
+public:
+  LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl,
+               SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
+               MachineMemOperand *MMO)
+      : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
+    LSBaseSDNodeBits.AddressingMode = AM;
+    assert(getAddressingMode() == AM && "Value truncated");
+  }
+
+  const SDValue &getOffset() const {
+    return getOperand(getOpcode() == ISD::LOAD ? 2 : 3);
+  }
+
+  /// Return the addressing mode for this load or store:
+  /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
+  ISD::MemIndexedMode getAddressingMode() const {
+    return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
+  }
+
+  /// Return true if this is a pre/post inc/dec load/store.
+  bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
+
+  /// Return true if this is NOT a pre/post inc/dec load/store.
+  bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::LOAD ||
+           N->getOpcode() == ISD::STORE;
+  }
+};
+
+/// This class is used to represent ISD::LOAD nodes.
+class LoadSDNode : public LSBaseSDNode {
+  friend class SelectionDAG;
+
+  LoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
+             ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT,
+             MachineMemOperand *MMO)
+      : LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) {
+    LoadSDNodeBits.ExtTy = ETy;
+    assert(readMem() && "Load MachineMemOperand is not a load!");
+    assert(!writeMem() && "Load MachineMemOperand is a store!");
+  }
+
+public:
+  /// Return whether this is a plain node,
+  /// or one of the varieties of value-extending loads.
+  ISD::LoadExtType getExtensionType() const {
+    return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
+  }
+
+  const SDValue &getBasePtr() const { return getOperand(1); }
+  const SDValue &getOffset() const { return getOperand(2); }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::LOAD;
+  }
+};
+
+/// This class is used to represent ISD::STORE nodes.
+class StoreSDNode : public LSBaseSDNode {
+  friend class SelectionDAG;
+
+  StoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
+              ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT,
+              MachineMemOperand *MMO)
+      : LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) {
+    StoreSDNodeBits.IsTruncating = isTrunc;
+    assert(!readMem() && "Store MachineMemOperand is a load!");
+    assert(writeMem() && "Store MachineMemOperand is not a store!");
+  }
+
+public:
+  /// Return true if the op does a truncation before store.
+  /// For integers this is the same as doing a TRUNCATE and storing the result.
+  /// For floats, it is the same as doing an FP_ROUND and storing the result.
+  bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
+  void setTruncatingStore(bool Truncating) {
+    StoreSDNodeBits.IsTruncating = Truncating;
+  }
+
+  const SDValue &getValue() const { return getOperand(1); }
+  const SDValue &getBasePtr() const { return getOperand(2); }
+  const SDValue &getOffset() const { return getOperand(3); }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::STORE;
+  }
+};
+
+/// This base class is used to represent MLOAD and MSTORE nodes
+class MaskedLoadStoreSDNode : public MemSDNode {
+public:
+  friend class SelectionDAG;
+
+  MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order,
+                        const DebugLoc &dl, SDVTList VTs, EVT MemVT,
+                        MachineMemOperand *MMO)
+      : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {}
+
+  // In the both nodes address is Op1, mask is Op2:
+  // MaskedLoadSDNode (Chain, ptr, mask, src0), src0 is a passthru value
+  // MaskedStoreSDNode (Chain, ptr, mask, data)
+  // Mask is a vector of i1 elements
+  const SDValue &getBasePtr() const { return getOperand(1); }
+  const SDValue &getMask() const    { return getOperand(2); }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::MLOAD ||
+           N->getOpcode() == ISD::MSTORE;
+  }
+};
+
+/// This class is used to represent an MLOAD node
+class MaskedLoadSDNode : public MaskedLoadStoreSDNode {
+public:
+  friend class SelectionDAG;
+
+  MaskedLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
+                   ISD::LoadExtType ETy, bool IsExpanding, EVT MemVT,
+                   MachineMemOperand *MMO)
+      : MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, VTs, MemVT, MMO) {
+    LoadSDNodeBits.ExtTy = ETy;
+    LoadSDNodeBits.IsExpanding = IsExpanding;
+  }
+
+  ISD::LoadExtType getExtensionType() const {
+    return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
+  }
+
+  const SDValue &getSrc0() const { return getOperand(3); }
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::MLOAD;
+  }
+
+  bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
+};
+
+/// This class is used to represent an MSTORE node
+class MaskedStoreSDNode : public MaskedLoadStoreSDNode {
+public:
+  friend class SelectionDAG;
+
+  MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
+                    bool isTrunc, bool isCompressing, EVT MemVT,
+                    MachineMemOperand *MMO)
+      : MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, VTs, MemVT, MMO) {
+    StoreSDNodeBits.IsTruncating = isTrunc;
+    StoreSDNodeBits.IsCompressing = isCompressing;
+  }
+
+  /// Return true if the op does a truncation before store.
+  /// For integers this is the same as doing a TRUNCATE and storing the result.
+  /// For floats, it is the same as doing an FP_ROUND and storing the result.
+  bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
+
+  /// Returns true if the op does a compression to the vector before storing.
+  /// The node contiguously stores the active elements (integers or floats)
+  /// in src (those with their respective bit set in writemask k) to unaligned
+  /// memory at base_addr.
+  bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
+
+  const SDValue &getValue() const { return getOperand(3); }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::MSTORE;
+  }
+};
+
+/// This is a base class used to represent
+/// MGATHER and MSCATTER nodes
+///
+class MaskedGatherScatterSDNode : public MemSDNode {
+public:
+  friend class SelectionDAG;
+
+  MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
+                            const DebugLoc &dl, SDVTList VTs, EVT MemVT,
+                            MachineMemOperand *MMO)
+      : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {}
+
+  // In the both nodes address is Op1, mask is Op2:
+  // MaskedGatherSDNode  (Chain, passthru, mask, base, index, scale)
+  // MaskedScatterSDNode (Chain, value, mask, base, index, scale)
+  // Mask is a vector of i1 elements
+  const SDValue &getBasePtr() const { return getOperand(3); }
+  const SDValue &getIndex()   const { return getOperand(4); }
+  const SDValue &getMask()    const { return getOperand(2); }
+  const SDValue &getValue()   const { return getOperand(1); }
+  const SDValue &getScale()   const { return getOperand(5); }
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::MGATHER ||
+           N->getOpcode() == ISD::MSCATTER;
+  }
+};
+
+/// This class is used to represent an MGATHER node
+///
+class MaskedGatherSDNode : public MaskedGatherScatterSDNode {
+public:
+  friend class SelectionDAG;
+
+  MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
+                     EVT MemVT, MachineMemOperand *MMO)
+      : MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO) {}
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::MGATHER;
+  }
+};
+
+/// This class is used to represent an MSCATTER node
+///
+class MaskedScatterSDNode : public MaskedGatherScatterSDNode {
+public:
+  friend class SelectionDAG;
+
+  MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
+                      EVT MemVT, MachineMemOperand *MMO)
+      : MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO) {}
+
+  static bool classof(const SDNode *N) {
+    return N->getOpcode() == ISD::MSCATTER;
+  }
+};
+
+/// An SDNode that represents everything that will be needed
+/// to construct a MachineInstr. These nodes are created during the
+/// instruction selection proper phase.
+class MachineSDNode : public SDNode {
+public:
+  using mmo_iterator = MachineMemOperand **;
+
+private:
+  friend class SelectionDAG;
+
+  MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, SDVTList VTs)
+      : SDNode(Opc, Order, DL, VTs) {}
+
+  /// Memory reference descriptions for this instruction.
+  mmo_iterator MemRefs = nullptr;
+  mmo_iterator MemRefsEnd = nullptr;
+
+public:
+  mmo_iterator memoperands_begin() const { return MemRefs; }
+  mmo_iterator memoperands_end() const { return MemRefsEnd; }
+  bool memoperands_empty() const { return MemRefsEnd == MemRefs; }
+
+  /// Assign this MachineSDNodes's memory reference descriptor
+  /// list. This does not transfer ownership.
+  void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
+    for (mmo_iterator MMI = NewMemRefs, MME = NewMemRefsEnd; MMI != MME; ++MMI)
+      assert(*MMI && "Null mem ref detected!");
+    MemRefs = NewMemRefs;
+    MemRefsEnd = NewMemRefsEnd;
+  }
+
+  static bool classof(const SDNode *N) {
+    return N->isMachineOpcode();
+  }
+};
+
+class SDNodeIterator : public std::iterator<std::forward_iterator_tag,
+                                            SDNode, ptrdiff_t> {
+  const SDNode *Node;
+  unsigned Operand;
+
+  SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {}
+
+public:
+  bool operator==(const SDNodeIterator& x) const {
+    return Operand == x.Operand;
+  }
+  bool operator!=(const SDNodeIterator& x) const { return !operator==(x); }
+
+  pointer operator*() const {
+    return Node->getOperand(Operand).getNode();
+  }
+  pointer operator->() const { return operator*(); }
+
+  SDNodeIterator& operator++() {                // Preincrement
+    ++Operand;
+    return *this;
+  }
+  SDNodeIterator operator++(int) { // Postincrement
+    SDNodeIterator tmp = *this; ++*this; return tmp;
+  }
+  size_t operator-(SDNodeIterator Other) const {
+    assert(Node == Other.Node &&
+           "Cannot compare iterators of two different nodes!");
+    return Operand - Other.Operand;
+  }
+
+  static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); }
+  static SDNodeIterator end  (const SDNode *N) {
+    return SDNodeIterator(N, N->getNumOperands());
+  }
+
+  unsigned getOperand() const { return Operand; }
+  const SDNode *getNode() const { return Node; }
+};
+
+template <> struct GraphTraits<SDNode*> {
+  using NodeRef = SDNode *;
+  using ChildIteratorType = SDNodeIterator;
+
+  static NodeRef getEntryNode(SDNode *N) { return N; }
+
+  static ChildIteratorType child_begin(NodeRef N) {
+    return SDNodeIterator::begin(N);
+  }
+
+  static ChildIteratorType child_end(NodeRef N) {
+    return SDNodeIterator::end(N);
+  }
+};
+
+/// A representation of the largest SDNode, for use in sizeof().
+///
+/// This needs to be a union because the largest node differs on 32 bit systems
+/// with 4 and 8 byte pointer alignment, respectively.
+using LargestSDNode = AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode,
+                                            BlockAddressSDNode,
+                                            GlobalAddressSDNode>;
+
+/// The SDNode class with the greatest alignment requirement.
+using MostAlignedSDNode = GlobalAddressSDNode;
+
+namespace ISD {
+
+  /// Returns true if the specified node is a non-extending and unindexed load.
+  inline bool isNormalLoad(const SDNode *N) {
+    const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N);
+    return Ld && Ld->getExtensionType() == ISD::NON_EXTLOAD &&
+      Ld->getAddressingMode() == ISD::UNINDEXED;
+  }
+
+  /// Returns true if the specified node is a non-extending load.
+  inline bool isNON_EXTLoad(const SDNode *N) {
+    return isa<LoadSDNode>(N) &&
+      cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
+  }
+
+  /// Returns true if the specified node is a EXTLOAD.
+  inline bool isEXTLoad(const SDNode *N) {
+    return isa<LoadSDNode>(N) &&
+      cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
+  }
+
+  /// Returns true if the specified node is a SEXTLOAD.
+  inline bool isSEXTLoad(const SDNode *N) {
+    return isa<LoadSDNode>(N) &&
+      cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
+  }
+
+  /// Returns true if the specified node is a ZEXTLOAD.
+  inline bool isZEXTLoad(const SDNode *N) {
+    return isa<LoadSDNode>(N) &&
+      cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
+  }
+
+  /// Returns true if the specified node is an unindexed load.
+  inline bool isUNINDEXEDLoad(const SDNode *N) {
+    return isa<LoadSDNode>(N) &&
+      cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
+  }
+
+  /// Returns true if the specified node is a non-truncating
+  /// and unindexed store.
+  inline bool isNormalStore(const SDNode *N) {
+    const StoreSDNode *St = dyn_cast<StoreSDNode>(N);
+    return St && !St->isTruncatingStore() &&
+      St->getAddressingMode() == ISD::UNINDEXED;
+  }
+
+  /// Returns true if the specified node is a non-truncating store.
+  inline bool isNON_TRUNCStore(const SDNode *N) {
+    return isa<StoreSDNode>(N) && !cast<StoreSDNode>(N)->isTruncatingStore();
+  }
+
+  /// Returns true if the specified node is a truncating store.
+  inline bool isTRUNCStore(const SDNode *N) {
+    return isa<StoreSDNode>(N) && cast<StoreSDNode>(N)->isTruncatingStore();
+  }
+
+  /// Returns true if the specified node is an unindexed store.
+  inline bool isUNINDEXEDStore(const SDNode *N) {
+    return isa<StoreSDNode>(N) &&
+      cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
+  }
+
+  /// Attempt to match a unary predicate against a scalar/splat constant or
+  /// every element of a constant BUILD_VECTOR.
+  bool matchUnaryPredicate(SDValue Op,
+                           std::function<bool(ConstantSDNode *)> Match);
+
+  /// Attempt to match a binary predicate against a pair of scalar/splat
+  /// constants or every element of a pair of constant BUILD_VECTORs.
+  bool matchBinaryPredicate(
+      SDValue LHS, SDValue RHS,
+      std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match);
+
+} // end namespace ISD
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SELECTIONDAGNODES_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/SelectionDAGTargetInfo.h b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGTargetInfo.h
new file mode 100644
index 0000000..45c1df4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/SelectionDAGTargetInfo.h
@@ -0,0 +1,160 @@
+//==- llvm/CodeGen/SelectionDAGTargetInfo.h - SelectionDAG Info --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SelectionDAGTargetInfo class, which targets can
+// subclass to parameterize the SelectionDAG lowering and instruction
+// selection process.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SELECTIONDAGTARGETINFO_H
+#define LLVM_CODEGEN_SELECTIONDAGTARGETINFO_H
+
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/Support/CodeGen.h"
+#include <utility>
+
+namespace llvm {
+
+class SelectionDAG;
+
+//===----------------------------------------------------------------------===//
+/// Targets can subclass this to parameterize the
+/// SelectionDAG lowering and instruction selection process.
+///
+class SelectionDAGTargetInfo {
+public:
+  explicit SelectionDAGTargetInfo() = default;
+  SelectionDAGTargetInfo(const SelectionDAGTargetInfo &) = delete;
+  SelectionDAGTargetInfo &operator=(const SelectionDAGTargetInfo &) = delete;
+  virtual ~SelectionDAGTargetInfo();
+
+  /// Emit target-specific code that performs a memcpy.
+  /// This can be used by targets to provide code sequences for cases
+  /// that don't fit the target's parameters for simple loads/stores and can be
+  /// more efficient than using a library call. This function can return a null
+  /// SDValue if the target declines to use custom code and a different
+  /// lowering strategy should be used.
+  ///
+  /// If AlwaysInline is true, the size is constant and the target should not
+  /// emit any calls and is strongly encouraged to attempt to emit inline code
+  /// even if it is beyond the usual threshold because this intrinsic is being
+  /// expanded in a place where calls are not feasible (e.g. within the prologue
+  /// for another call). If the target chooses to decline an AlwaysInline
+  /// request here, legalize will resort to using simple loads and stores.
+  virtual SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
+                                          SDValue Chain, SDValue Op1,
+                                          SDValue Op2, SDValue Op3,
+                                          unsigned Align, bool isVolatile,
+                                          bool AlwaysInline,
+                                          MachinePointerInfo DstPtrInfo,
+                                          MachinePointerInfo SrcPtrInfo) const {
+    return SDValue();
+  }
+
+  /// Emit target-specific code that performs a memmove.
+  /// This can be used by targets to provide code sequences for cases
+  /// that don't fit the target's parameters for simple loads/stores and can be
+  /// more efficient than using a library call. This function can return a null
+  /// SDValue if the target declines to use custom code and a different
+  /// lowering strategy should be used.
+  virtual SDValue EmitTargetCodeForMemmove(
+      SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1,
+      SDValue Op2, SDValue Op3, unsigned Align, bool isVolatile,
+      MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
+    return SDValue();
+  }
+
+  /// Emit target-specific code that performs a memset.
+  /// This can be used by targets to provide code sequences for cases
+  /// that don't fit the target's parameters for simple stores and can be more
+  /// efficient than using a library call. This function can return a null
+  /// SDValue if the target declines to use custom code and a different
+  /// lowering strategy should be used.
+  virtual SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, const SDLoc &dl,
+                                          SDValue Chain, SDValue Op1,
+                                          SDValue Op2, SDValue Op3,
+                                          unsigned Align, bool isVolatile,
+                                          MachinePointerInfo DstPtrInfo) const {
+    return SDValue();
+  }
+
+  /// Emit target-specific code that performs a memcmp, in cases where that is
+  /// faster than a libcall. The first returned SDValue is the result of the
+  /// memcmp and the second is the chain. Both SDValues can be null if a normal
+  /// libcall should be used.
+  virtual std::pair<SDValue, SDValue>
+  EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain,
+                          SDValue Op1, SDValue Op2, SDValue Op3,
+                          MachinePointerInfo Op1PtrInfo,
+                          MachinePointerInfo Op2PtrInfo) const {
+    return std::make_pair(SDValue(), SDValue());
+  }
+
+  /// Emit target-specific code that performs a memchr, in cases where that is
+  /// faster than a libcall. The first returned SDValue is the result of the
+  /// memchr and the second is the chain. Both SDValues can be null if a normal
+  /// libcall should be used.
+  virtual std::pair<SDValue, SDValue>
+  EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain,
+                          SDValue Src, SDValue Char, SDValue Length,
+                          MachinePointerInfo SrcPtrInfo) const {
+    return std::make_pair(SDValue(), SDValue());
+  }
+
+  /// Emit target-specific code that performs a strcpy or stpcpy, in cases
+  /// where that is faster than a libcall.
+  /// The first returned SDValue is the result of the copy (the start
+  /// of the destination string for strcpy, a pointer to the null terminator
+  /// for stpcpy) and the second is the chain.  Both SDValues can be null
+  /// if a normal libcall should be used.
+  virtual std::pair<SDValue, SDValue>
+  EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
+                          SDValue Dest, SDValue Src,
+                          MachinePointerInfo DestPtrInfo,
+                          MachinePointerInfo SrcPtrInfo, bool isStpcpy) const {
+    return std::make_pair(SDValue(), SDValue());
+  }
+
+  /// Emit target-specific code that performs a strcmp, in cases where that is
+  /// faster than a libcall.
+  /// The first returned SDValue is the result of the strcmp and the second is
+  /// the chain. Both SDValues can be null if a normal libcall should be used.
+  virtual std::pair<SDValue, SDValue>
+  EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain,
+                          SDValue Op1, SDValue Op2,
+                          MachinePointerInfo Op1PtrInfo,
+                          MachinePointerInfo Op2PtrInfo) const {
+    return std::make_pair(SDValue(), SDValue());
+  }
+
+  virtual std::pair<SDValue, SDValue>
+  EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
+                          SDValue Src, MachinePointerInfo SrcPtrInfo) const {
+    return std::make_pair(SDValue(), SDValue());
+  }
+
+  virtual std::pair<SDValue, SDValue>
+  EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
+                           SDValue Src, SDValue MaxLength,
+                           MachinePointerInfo SrcPtrInfo) const {
+    return std::make_pair(SDValue(), SDValue());
+  }
+
+  // Return true when the decision to generate FMA's (or FMS, FMLA etc) rather
+  // than FMUL and ADD is delegated to the machine combiner.
+  virtual bool generateFMAsInMachineCombiner(CodeGenOpt::Level OptLevel) const {
+    return false;
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SELECTIONDAGTARGETINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/SlotIndexes.h b/linux-x64/clang/include/llvm/CodeGen/SlotIndexes.h
new file mode 100644
index 0000000..3a91e36
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/SlotIndexes.h
@@ -0,0 +1,714 @@
+//===- llvm/CodeGen/SlotIndexes.h - Slot indexes representation -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements SlotIndex and related classes. The purpose of SlotIndex
+// is to describe a position at which a register can become live, or cease to
+// be live.
+//
+// SlotIndex is mostly a proxy for entries of the SlotIndexList, a class which
+// is held is LiveIntervals and provides the real numbering. This allows
+// LiveIntervals to perform largely transparent renumbering.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SLOTINDEXES_H
+#define LLVM_CODEGEN_SLOTINDEXES_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IntervalMap.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBundle.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Allocator.h"
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+class raw_ostream;
+
+  /// This class represents an entry in the slot index list held in the
+  /// SlotIndexes pass. It should not be used directly. See the
+  /// SlotIndex & SlotIndexes classes for the public interface to this
+  /// information.
+  class IndexListEntry : public ilist_node<IndexListEntry> {
+    MachineInstr *mi;
+    unsigned index;
+
+  public:
+    IndexListEntry(MachineInstr *mi, unsigned index) : mi(mi), index(index) {}
+
+    MachineInstr* getInstr() const { return mi; }
+    void setInstr(MachineInstr *mi) {
+      this->mi = mi;
+    }
+
+    unsigned getIndex() const { return index; }
+    void setIndex(unsigned index) {
+      this->index = index;
+    }
+
+#ifdef EXPENSIVE_CHECKS
+    // When EXPENSIVE_CHECKS is defined, "erased" index list entries will
+    // actually be moved to a "graveyard" list, and have their pointers
+    // poisoned, so that dangling SlotIndex access can be reliably detected.
+    void setPoison() {
+      intptr_t tmp = reinterpret_cast<intptr_t>(mi);
+      assert(((tmp & 0x1) == 0x0) && "Pointer already poisoned?");
+      tmp |= 0x1;
+      mi = reinterpret_cast<MachineInstr*>(tmp);
+    }
+
+    bool isPoisoned() const { return (reinterpret_cast<intptr_t>(mi) & 0x1) == 0x1; }
+#endif // EXPENSIVE_CHECKS
+  };
+
+  template <>
+  struct ilist_alloc_traits<IndexListEntry>
+      : public ilist_noalloc_traits<IndexListEntry> {};
+
+  /// SlotIndex - An opaque wrapper around machine indexes.
+  class SlotIndex {
+    friend class SlotIndexes;
+
+    enum Slot {
+      /// Basic block boundary.  Used for live ranges entering and leaving a
+      /// block without being live in the layout neighbor.  Also used as the
+      /// def slot of PHI-defs.
+      Slot_Block,
+
+      /// Early-clobber register use/def slot.  A live range defined at
+      /// Slot_EarlyClobber interferes with normal live ranges killed at
+      /// Slot_Register.  Also used as the kill slot for live ranges tied to an
+      /// early-clobber def.
+      Slot_EarlyClobber,
+
+      /// Normal register use/def slot.  Normal instructions kill and define
+      /// register live ranges at this slot.
+      Slot_Register,
+
+      /// Dead def kill point.  Kill slot for a live range that is defined by
+      /// the same instruction (Slot_Register or Slot_EarlyClobber), but isn't
+      /// used anywhere.
+      Slot_Dead,
+
+      Slot_Count
+    };
+
+    PointerIntPair<IndexListEntry*, 2, unsigned> lie;
+
+    SlotIndex(IndexListEntry *entry, unsigned slot)
+      : lie(entry, slot) {}
+
+    IndexListEntry* listEntry() const {
+      assert(isValid() && "Attempt to compare reserved index.");
+#ifdef EXPENSIVE_CHECKS
+      assert(!lie.getPointer()->isPoisoned() &&
+             "Attempt to access deleted list-entry.");
+#endif // EXPENSIVE_CHECKS
+      return lie.getPointer();
+    }
+
+    unsigned getIndex() const {
+      return listEntry()->getIndex() | getSlot();
+    }
+
+    /// Returns the slot for this SlotIndex.
+    Slot getSlot() const {
+      return static_cast<Slot>(lie.getInt());
+    }
+
+  public:
+    enum {
+      /// The default distance between instructions as returned by distance().
+      /// This may vary as instructions are inserted and removed.
+      InstrDist = 4 * Slot_Count
+    };
+
+    /// Construct an invalid index.
+    SlotIndex() = default;
+
+    // Construct a new slot index from the given one, and set the slot.
+    SlotIndex(const SlotIndex &li, Slot s) : lie(li.listEntry(), unsigned(s)) {
+      assert(lie.getPointer() != nullptr &&
+             "Attempt to construct index with 0 pointer.");
+    }
+
+    /// Returns true if this is a valid index. Invalid indices do
+    /// not point into an index table, and cannot be compared.
+    bool isValid() const {
+      return lie.getPointer();
+    }
+
+    /// Return true for a valid index.
+    explicit operator bool() const { return isValid(); }
+
+    /// Print this index to the given raw_ostream.
+    void print(raw_ostream &os) const;
+
+    /// Dump this index to stderr.
+    void dump() const;
+
+    /// Compare two SlotIndex objects for equality.
+    bool operator==(SlotIndex other) const {
+      return lie == other.lie;
+    }
+    /// Compare two SlotIndex objects for inequality.
+    bool operator!=(SlotIndex other) const {
+      return lie != other.lie;
+    }
+
+    /// Compare two SlotIndex objects. Return true if the first index
+    /// is strictly lower than the second.
+    bool operator<(SlotIndex other) const {
+      return getIndex() < other.getIndex();
+    }
+    /// Compare two SlotIndex objects. Return true if the first index
+    /// is lower than, or equal to, the second.
+    bool operator<=(SlotIndex other) const {
+      return getIndex() <= other.getIndex();
+    }
+
+    /// Compare two SlotIndex objects. Return true if the first index
+    /// is greater than the second.
+    bool operator>(SlotIndex other) const {
+      return getIndex() > other.getIndex();
+    }
+
+    /// Compare two SlotIndex objects. Return true if the first index
+    /// is greater than, or equal to, the second.
+    bool operator>=(SlotIndex other) const {
+      return getIndex() >= other.getIndex();
+    }
+
+    /// isSameInstr - Return true if A and B refer to the same instruction.
+    static bool isSameInstr(SlotIndex A, SlotIndex B) {
+      return A.lie.getPointer() == B.lie.getPointer();
+    }
+
+    /// isEarlierInstr - Return true if A refers to an instruction earlier than
+    /// B. This is equivalent to A < B && !isSameInstr(A, B).
+    static bool isEarlierInstr(SlotIndex A, SlotIndex B) {
+      return A.listEntry()->getIndex() < B.listEntry()->getIndex();
+    }
+
+    /// Return true if A refers to the same instruction as B or an earlier one.
+    /// This is equivalent to !isEarlierInstr(B, A).
+    static bool isEarlierEqualInstr(SlotIndex A, SlotIndex B) {
+      return !isEarlierInstr(B, A);
+    }
+
+    /// Return the distance from this index to the given one.
+    int distance(SlotIndex other) const {
+      return other.getIndex() - getIndex();
+    }
+
+    /// Return the scaled distance from this index to the given one, where all
+    /// slots on the same instruction have zero distance.
+    int getInstrDistance(SlotIndex other) const {
+      return (other.listEntry()->getIndex() - listEntry()->getIndex())
+        / Slot_Count;
+    }
+
+    /// isBlock - Returns true if this is a block boundary slot.
+    bool isBlock() const { return getSlot() == Slot_Block; }
+
+    /// isEarlyClobber - Returns true if this is an early-clobber slot.
+    bool isEarlyClobber() const { return getSlot() == Slot_EarlyClobber; }
+
+    /// isRegister - Returns true if this is a normal register use/def slot.
+    /// Note that early-clobber slots may also be used for uses and defs.
+    bool isRegister() const { return getSlot() == Slot_Register; }
+
+    /// isDead - Returns true if this is a dead def kill slot.
+    bool isDead() const { return getSlot() == Slot_Dead; }
+
+    /// Returns the base index for associated with this index. The base index
+    /// is the one associated with the Slot_Block slot for the instruction
+    /// pointed to by this index.
+    SlotIndex getBaseIndex() const {
+      return SlotIndex(listEntry(), Slot_Block);
+    }
+
+    /// Returns the boundary index for associated with this index. The boundary
+    /// index is the one associated with the Slot_Block slot for the instruction
+    /// pointed to by this index.
+    SlotIndex getBoundaryIndex() const {
+      return SlotIndex(listEntry(), Slot_Dead);
+    }
+
+    /// Returns the register use/def slot in the current instruction for a
+    /// normal or early-clobber def.
+    SlotIndex getRegSlot(bool EC = false) const {
+      return SlotIndex(listEntry(), EC ? Slot_EarlyClobber : Slot_Register);
+    }
+
+    /// Returns the dead def kill slot for the current instruction.
+    SlotIndex getDeadSlot() const {
+      return SlotIndex(listEntry(), Slot_Dead);
+    }
+
+    /// Returns the next slot in the index list. This could be either the
+    /// next slot for the instruction pointed to by this index or, if this
+    /// index is a STORE, the first slot for the next instruction.
+    /// WARNING: This method is considerably more expensive than the methods
+    /// that return specific slots (getUseIndex(), etc). If you can - please
+    /// use one of those methods.
+    SlotIndex getNextSlot() const {
+      Slot s = getSlot();
+      if (s == Slot_Dead) {
+        return SlotIndex(&*++listEntry()->getIterator(), Slot_Block);
+      }
+      return SlotIndex(listEntry(), s + 1);
+    }
+
+    /// Returns the next index. This is the index corresponding to the this
+    /// index's slot, but for the next instruction.
+    SlotIndex getNextIndex() const {
+      return SlotIndex(&*++listEntry()->getIterator(), getSlot());
+    }
+
+    /// Returns the previous slot in the index list. This could be either the
+    /// previous slot for the instruction pointed to by this index or, if this
+    /// index is a Slot_Block, the last slot for the previous instruction.
+    /// WARNING: This method is considerably more expensive than the methods
+    /// that return specific slots (getUseIndex(), etc). If you can - please
+    /// use one of those methods.
+    SlotIndex getPrevSlot() const {
+      Slot s = getSlot();
+      if (s == Slot_Block) {
+        return SlotIndex(&*--listEntry()->getIterator(), Slot_Dead);
+      }
+      return SlotIndex(listEntry(), s - 1);
+    }
+
+    /// Returns the previous index. This is the index corresponding to this
+    /// index's slot, but for the previous instruction.
+    SlotIndex getPrevIndex() const {
+      return SlotIndex(&*--listEntry()->getIterator(), getSlot());
+    }
+  };
+
+  template <> struct isPodLike<SlotIndex> { static const bool value = true; };
+
+  inline raw_ostream& operator<<(raw_ostream &os, SlotIndex li) {
+    li.print(os);
+    return os;
+  }
+
+  using IdxMBBPair = std::pair<SlotIndex, MachineBasicBlock *>;
+
+  inline bool operator<(SlotIndex V, const IdxMBBPair &IM) {
+    return V < IM.first;
+  }
+
+  inline bool operator<(const IdxMBBPair &IM, SlotIndex V) {
+    return IM.first < V;
+  }
+
+  struct Idx2MBBCompare {
+    bool operator()(const IdxMBBPair &LHS, const IdxMBBPair &RHS) const {
+      return LHS.first < RHS.first;
+    }
+  };
+
+  /// SlotIndexes pass.
+  ///
+  /// This pass assigns indexes to each instruction.
+  class SlotIndexes : public MachineFunctionPass {
+  private:
+    // IndexListEntry allocator.
+    BumpPtrAllocator ileAllocator;
+
+    using IndexList = ilist<IndexListEntry>;
+    IndexList indexList;
+
+#ifdef EXPENSIVE_CHECKS
+    IndexList graveyardList;
+#endif // EXPENSIVE_CHECKS
+
+    MachineFunction *mf;
+
+    using Mi2IndexMap = DenseMap<const MachineInstr *, SlotIndex>;
+    Mi2IndexMap mi2iMap;
+
+    /// MBBRanges - Map MBB number to (start, stop) indexes.
+    SmallVector<std::pair<SlotIndex, SlotIndex>, 8> MBBRanges;
+
+    /// Idx2MBBMap - Sorted list of pairs of index of first instruction
+    /// and MBB id.
+    SmallVector<IdxMBBPair, 8> idx2MBBMap;
+
+    IndexListEntry* createEntry(MachineInstr *mi, unsigned index) {
+      IndexListEntry *entry =
+          static_cast<IndexListEntry *>(ileAllocator.Allocate(
+              sizeof(IndexListEntry), alignof(IndexListEntry)));
+
+      new (entry) IndexListEntry(mi, index);
+
+      return entry;
+    }
+
+    /// Renumber locally after inserting curItr.
+    void renumberIndexes(IndexList::iterator curItr);
+
+  public:
+    static char ID;
+
+    SlotIndexes() : MachineFunctionPass(ID) {
+      initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
+    }
+
+    ~SlotIndexes() override {
+      // The indexList's nodes are all allocated in the BumpPtrAllocator.
+      indexList.clearAndLeakNodesUnsafely();
+    }
+
+    void getAnalysisUsage(AnalysisUsage &au) const override;
+    void releaseMemory() override;
+
+    bool runOnMachineFunction(MachineFunction &fn) override;
+
+    /// Dump the indexes.
+    void dump() const;
+
+    /// Renumber the index list, providing space for new instructions.
+    void renumberIndexes();
+
+    /// Repair indexes after adding and removing instructions.
+    void repairIndexesInRange(MachineBasicBlock *MBB,
+                              MachineBasicBlock::iterator Begin,
+                              MachineBasicBlock::iterator End);
+
+    /// Returns the zero index for this analysis.
+    SlotIndex getZeroIndex() {
+      assert(indexList.front().getIndex() == 0 && "First index is not 0?");
+      return SlotIndex(&indexList.front(), 0);
+    }
+
+    /// Returns the base index of the last slot in this analysis.
+    SlotIndex getLastIndex() {
+      return SlotIndex(&indexList.back(), 0);
+    }
+
+    /// Returns true if the given machine instr is mapped to an index,
+    /// otherwise returns false.
+    bool hasIndex(const MachineInstr &instr) const {
+      return mi2iMap.count(&instr);
+    }
+
+    /// Returns the base index for the given instruction.
+    SlotIndex getInstructionIndex(const MachineInstr &MI) const {
+      // Instructions inside a bundle have the same number as the bundle itself.
+      const MachineInstr &BundleStart = *getBundleStart(MI.getIterator());
+      Mi2IndexMap::const_iterator itr = mi2iMap.find(&BundleStart);
+      assert(itr != mi2iMap.end() && "Instruction not found in maps.");
+      return itr->second;
+    }
+
+    /// Returns the instruction for the given index, or null if the given
+    /// index has no instruction associated with it.
+    MachineInstr* getInstructionFromIndex(SlotIndex index) const {
+      return index.isValid() ? index.listEntry()->getInstr() : nullptr;
+    }
+
+    /// Returns the next non-null index, if one exists.
+    /// Otherwise returns getLastIndex().
+    SlotIndex getNextNonNullIndex(SlotIndex Index) {
+      IndexList::iterator I = Index.listEntry()->getIterator();
+      IndexList::iterator E = indexList.end();
+      while (++I != E)
+        if (I->getInstr())
+          return SlotIndex(&*I, Index.getSlot());
+      // We reached the end of the function.
+      return getLastIndex();
+    }
+
+    /// getIndexBefore - Returns the index of the last indexed instruction
+    /// before MI, or the start index of its basic block.
+    /// MI is not required to have an index.
+    SlotIndex getIndexBefore(const MachineInstr &MI) const {
+      const MachineBasicBlock *MBB = MI.getParent();
+      assert(MBB && "MI must be inserted inna basic block");
+      MachineBasicBlock::const_iterator I = MI, B = MBB->begin();
+      while (true) {
+        if (I == B)
+          return getMBBStartIdx(MBB);
+        --I;
+        Mi2IndexMap::const_iterator MapItr = mi2iMap.find(&*I);
+        if (MapItr != mi2iMap.end())
+          return MapItr->second;
+      }
+    }
+
+    /// getIndexAfter - Returns the index of the first indexed instruction
+    /// after MI, or the end index of its basic block.
+    /// MI is not required to have an index.
+    SlotIndex getIndexAfter(const MachineInstr &MI) const {
+      const MachineBasicBlock *MBB = MI.getParent();
+      assert(MBB && "MI must be inserted inna basic block");
+      MachineBasicBlock::const_iterator I = MI, E = MBB->end();
+      while (true) {
+        ++I;
+        if (I == E)
+          return getMBBEndIdx(MBB);
+        Mi2IndexMap::const_iterator MapItr = mi2iMap.find(&*I);
+        if (MapItr != mi2iMap.end())
+          return MapItr->second;
+      }
+    }
+
+    /// Return the (start,end) range of the given basic block number.
+    const std::pair<SlotIndex, SlotIndex> &
+    getMBBRange(unsigned Num) const {
+      return MBBRanges[Num];
+    }
+
+    /// Return the (start,end) range of the given basic block.
+    const std::pair<SlotIndex, SlotIndex> &
+    getMBBRange(const MachineBasicBlock *MBB) const {
+      return getMBBRange(MBB->getNumber());
+    }
+
+    /// Returns the first index in the given basic block number.
+    SlotIndex getMBBStartIdx(unsigned Num) const {
+      return getMBBRange(Num).first;
+    }
+
+    /// Returns the first index in the given basic block.
+    SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
+      return getMBBRange(mbb).first;
+    }
+
+    /// Returns the last index in the given basic block number.
+    SlotIndex getMBBEndIdx(unsigned Num) const {
+      return getMBBRange(Num).second;
+    }
+
+    /// Returns the last index in the given basic block.
+    SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
+      return getMBBRange(mbb).second;
+    }
+
+    /// Iterator over the idx2MBBMap (sorted pairs of slot index of basic block
+    /// begin and basic block)
+    using MBBIndexIterator = SmallVectorImpl<IdxMBBPair>::const_iterator;
+
+    /// Move iterator to the next IdxMBBPair where the SlotIndex is greater or
+    /// equal to \p To.
+    MBBIndexIterator advanceMBBIndex(MBBIndexIterator I, SlotIndex To) const {
+      return std::lower_bound(I, idx2MBBMap.end(), To);
+    }
+
+    /// Get an iterator pointing to the IdxMBBPair with the biggest SlotIndex
+    /// that is greater or equal to \p Idx.
+    MBBIndexIterator findMBBIndex(SlotIndex Idx) const {
+      return advanceMBBIndex(idx2MBBMap.begin(), Idx);
+    }
+
+    /// Returns an iterator for the begin of the idx2MBBMap.
+    MBBIndexIterator MBBIndexBegin() const {
+      return idx2MBBMap.begin();
+    }
+
+    /// Return an iterator for the end of the idx2MBBMap.
+    MBBIndexIterator MBBIndexEnd() const {
+      return idx2MBBMap.end();
+    }
+
+    /// Returns the basic block which the given index falls in.
+    MachineBasicBlock* getMBBFromIndex(SlotIndex index) const {
+      if (MachineInstr *MI = getInstructionFromIndex(index))
+        return MI->getParent();
+
+      MBBIndexIterator I = findMBBIndex(index);
+      // Take the pair containing the index
+      MBBIndexIterator J =
+        ((I != MBBIndexEnd() && I->first > index) ||
+         (I == MBBIndexEnd() && !idx2MBBMap.empty())) ? std::prev(I) : I;
+
+      assert(J != MBBIndexEnd() && J->first <= index &&
+             index < getMBBEndIdx(J->second) &&
+             "index does not correspond to an MBB");
+      return J->second;
+    }
+
+    /// Returns the MBB covering the given range, or null if the range covers
+    /// more than one basic block.
+    MachineBasicBlock* getMBBCoveringRange(SlotIndex start, SlotIndex end) const {
+
+      assert(start < end && "Backwards ranges not allowed.");
+      MBBIndexIterator itr = findMBBIndex(start);
+      if (itr == MBBIndexEnd()) {
+        itr = std::prev(itr);
+        return itr->second;
+      }
+
+      // Check that we don't cross the boundary into this block.
+      if (itr->first < end)
+        return nullptr;
+
+      itr = std::prev(itr);
+
+      if (itr->first <= start)
+        return itr->second;
+
+      return nullptr;
+    }
+
+    /// Insert the given machine instruction into the mapping. Returns the
+    /// assigned index.
+    /// If Late is set and there are null indexes between mi's neighboring
+    /// instructions, create the new index after the null indexes instead of
+    /// before them.
+    SlotIndex insertMachineInstrInMaps(MachineInstr &MI, bool Late = false) {
+      assert(!MI.isInsideBundle() &&
+             "Instructions inside bundles should use bundle start's slot.");
+      assert(mi2iMap.find(&MI) == mi2iMap.end() && "Instr already indexed.");
+      // Numbering DBG_VALUE instructions could cause code generation to be
+      // affected by debug information.
+      assert(!MI.isDebugValue() && "Cannot number DBG_VALUE instructions.");
+
+      assert(MI.getParent() != nullptr && "Instr must be added to function.");
+
+      // Get the entries where MI should be inserted.
+      IndexList::iterator prevItr, nextItr;
+      if (Late) {
+        // Insert MI's index immediately before the following instruction.
+        nextItr = getIndexAfter(MI).listEntry()->getIterator();
+        prevItr = std::prev(nextItr);
+      } else {
+        // Insert MI's index immediately after the preceding instruction.
+        prevItr = getIndexBefore(MI).listEntry()->getIterator();
+        nextItr = std::next(prevItr);
+      }
+
+      // Get a number for the new instr, or 0 if there's no room currently.
+      // In the latter case we'll force a renumber later.
+      unsigned dist = ((nextItr->getIndex() - prevItr->getIndex())/2) & ~3u;
+      unsigned newNumber = prevItr->getIndex() + dist;
+
+      // Insert a new list entry for MI.
+      IndexList::iterator newItr =
+          indexList.insert(nextItr, createEntry(&MI, newNumber));
+
+      // Renumber locally if we need to.
+      if (dist == 0)
+        renumberIndexes(newItr);
+
+      SlotIndex newIndex(&*newItr, SlotIndex::Slot_Block);
+      mi2iMap.insert(std::make_pair(&MI, newIndex));
+      return newIndex;
+    }
+
+    /// Removes machine instruction (bundle) \p MI from the mapping.
+    /// This should be called before MachineInstr::eraseFromParent() is used to
+    /// remove a whole bundle or an unbundled instruction.
+    void removeMachineInstrFromMaps(MachineInstr &MI);
+
+    /// Removes a single machine instruction \p MI from the mapping.
+    /// This should be called before MachineInstr::eraseFromBundle() is used to
+    /// remove a single instruction (out of a bundle).
+    void removeSingleMachineInstrFromMaps(MachineInstr &MI);
+
+    /// ReplaceMachineInstrInMaps - Replacing a machine instr with a new one in
+    /// maps used by register allocator. \returns the index where the new
+    /// instruction was inserted.
+    SlotIndex replaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI) {
+      Mi2IndexMap::iterator mi2iItr = mi2iMap.find(&MI);
+      if (mi2iItr == mi2iMap.end())
+        return SlotIndex();
+      SlotIndex replaceBaseIndex = mi2iItr->second;
+      IndexListEntry *miEntry(replaceBaseIndex.listEntry());
+      assert(miEntry->getInstr() == &MI &&
+             "Mismatched instruction in index tables.");
+      miEntry->setInstr(&NewMI);
+      mi2iMap.erase(mi2iItr);
+      mi2iMap.insert(std::make_pair(&NewMI, replaceBaseIndex));
+      return replaceBaseIndex;
+    }
+
+    /// Add the given MachineBasicBlock into the maps.
+    void insertMBBInMaps(MachineBasicBlock *mbb) {
+      MachineFunction::iterator nextMBB =
+        std::next(MachineFunction::iterator(mbb));
+
+      IndexListEntry *startEntry = nullptr;
+      IndexListEntry *endEntry = nullptr;
+      IndexList::iterator newItr;
+      if (nextMBB == mbb->getParent()->end()) {
+        startEntry = &indexList.back();
+        endEntry = createEntry(nullptr, 0);
+        newItr = indexList.insertAfter(startEntry->getIterator(), endEntry);
+      } else {
+        startEntry = createEntry(nullptr, 0);
+        endEntry = getMBBStartIdx(&*nextMBB).listEntry();
+        newItr = indexList.insert(endEntry->getIterator(), startEntry);
+      }
+
+      SlotIndex startIdx(startEntry, SlotIndex::Slot_Block);
+      SlotIndex endIdx(endEntry, SlotIndex::Slot_Block);
+
+      MachineFunction::iterator prevMBB(mbb);
+      assert(prevMBB != mbb->getParent()->end() &&
+             "Can't insert a new block at the beginning of a function.");
+      --prevMBB;
+      MBBRanges[prevMBB->getNumber()].second = startIdx;
+
+      assert(unsigned(mbb->getNumber()) == MBBRanges.size() &&
+             "Blocks must be added in order");
+      MBBRanges.push_back(std::make_pair(startIdx, endIdx));
+      idx2MBBMap.push_back(IdxMBBPair(startIdx, mbb));
+
+      renumberIndexes(newItr);
+      std::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
+    }
+
+    /// \brief Free the resources that were required to maintain a SlotIndex.
+    ///
+    /// Once an index is no longer needed (for instance because the instruction
+    /// at that index has been moved), the resources required to maintain the
+    /// index can be relinquished to reduce memory use and improve renumbering
+    /// performance. Any remaining SlotIndex objects that point to the same
+    /// index are left 'dangling' (much the same as a dangling pointer to a
+    /// freed object) and should not be accessed, except to destruct them.
+    ///
+    /// Like dangling pointers, access to dangling SlotIndexes can cause
+    /// painful-to-track-down bugs, especially if the memory for the index
+    /// previously pointed to has been re-used. To detect dangling SlotIndex
+    /// bugs, build with EXPENSIVE_CHECKS=1. This will cause "erased" indexes to
+    /// be retained in a graveyard instead of being freed. Operations on indexes
+    /// in the graveyard will trigger an assertion.
+    void eraseIndex(SlotIndex index) {
+      IndexListEntry *entry = index.listEntry();
+#ifdef EXPENSIVE_CHECKS
+      indexList.remove(entry);
+      graveyardList.push_back(entry);
+      entry->setPoison();
+#else
+      indexList.erase(entry);
+#endif
+    }
+  };
+
+  // Specialize IntervalMapInfo for half-open slot index intervals.
+  template <>
+  struct IntervalMapInfo<SlotIndex> : IntervalMapHalfOpenInfo<SlotIndex> {
+  };
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_SLOTINDEXES_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/StackMaps.h b/linux-x64/clang/include/llvm/CodeGen/StackMaps.h
new file mode 100644
index 0000000..4407114
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/StackMaps.h
@@ -0,0 +1,332 @@
+//===- StackMaps.h - StackMaps ----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_STACKMAPS_H
+#define LLVM_CODEGEN_STACKMAPS_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Debug.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+class AsmPrinter;
+class MCExpr;
+class MCStreamer;
+class raw_ostream;
+class TargetRegisterInfo;
+
+/// \brief MI-level stackmap operands.
+///
+/// MI stackmap operations take the form:
+/// <id>, <numBytes>, live args...
+class StackMapOpers {
+public:
+  /// Enumerate the meta operands.
+  enum { IDPos, NBytesPos };
+
+private:
+  const MachineInstr* MI;
+
+public:
+  explicit StackMapOpers(const MachineInstr *MI);
+
+  /// Return the ID for the given stackmap
+  uint64_t getID() const { return MI->getOperand(IDPos).getImm(); }
+
+  /// Return the number of patchable bytes the given stackmap should emit.
+  uint32_t getNumPatchBytes() const {
+    return MI->getOperand(NBytesPos).getImm();
+  }
+
+  /// Get the operand index of the variable list of non-argument operands.
+  /// These hold the "live state".
+  unsigned getVarIdx() const {
+    // Skip ID, nShadowBytes.
+    return 2;
+  }
+};
+
+/// \brief MI-level patchpoint operands.
+///
+/// MI patchpoint operations take the form:
+/// [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ...
+///
+/// IR patchpoint intrinsics do not have the <cc> operand because calling
+/// convention is part of the subclass data.
+///
+/// SD patchpoint nodes do not have a def operand because it is part of the
+/// SDValue.
+///
+/// Patchpoints following the anyregcc convention are handled specially. For
+/// these, the stack map also records the location of the return value and
+/// arguments.
+class PatchPointOpers {
+public:
+  /// Enumerate the meta operands.
+  enum { IDPos, NBytesPos, TargetPos, NArgPos, CCPos, MetaEnd };
+
+private:
+  const MachineInstr *MI;
+  bool HasDef;
+
+  unsigned getMetaIdx(unsigned Pos = 0) const {
+    assert(Pos < MetaEnd && "Meta operand index out of range.");
+    return (HasDef ? 1 : 0) + Pos;
+  }
+
+  const MachineOperand &getMetaOper(unsigned Pos) const {
+    return MI->getOperand(getMetaIdx(Pos));
+  }
+
+public:
+  explicit PatchPointOpers(const MachineInstr *MI);
+
+  bool isAnyReg() const { return (getCallingConv() == CallingConv::AnyReg); }
+  bool hasDef() const { return HasDef; }
+
+  /// Return the ID for the given patchpoint.
+  uint64_t getID() const { return getMetaOper(IDPos).getImm(); }
+
+  /// Return the number of patchable bytes the given patchpoint should emit.
+  uint32_t getNumPatchBytes() const {
+    return getMetaOper(NBytesPos).getImm();
+  }
+
+  /// Returns the target of the underlying call.
+  const MachineOperand &getCallTarget() const {
+    return getMetaOper(TargetPos);
+  }
+
+  /// Returns the calling convention
+  CallingConv::ID getCallingConv() const {
+    return getMetaOper(CCPos).getImm();
+  }
+
+  unsigned getArgIdx() const { return getMetaIdx() + MetaEnd; }
+
+  /// Return the number of call arguments
+  uint32_t getNumCallArgs() const {
+    return MI->getOperand(getMetaIdx(NArgPos)).getImm();
+  }
+
+  /// Get the operand index of the variable list of non-argument operands.
+  /// These hold the "live state".
+  unsigned getVarIdx() const {
+    return getMetaIdx() + MetaEnd + getNumCallArgs();
+  }
+
+  /// Get the index at which stack map locations will be recorded.
+  /// Arguments are not recorded unless the anyregcc convention is used.
+  unsigned getStackMapStartIdx() const {
+    if (isAnyReg())
+      return getArgIdx();
+    return getVarIdx();
+  }
+
+  /// \brief Get the next scratch register operand index.
+  unsigned getNextScratchIdx(unsigned StartIdx = 0) const;
+};
+
+/// MI-level Statepoint operands
+///
+/// Statepoint operands take the form:
+///   <id>, <num patch bytes >, <num call arguments>, <call target>,
+///   [call arguments...],
+///   <StackMaps::ConstantOp>, <calling convention>,
+///   <StackMaps::ConstantOp>, <statepoint flags>,
+///   <StackMaps::ConstantOp>, <num deopt args>, [deopt args...],
+///   <gc base/derived pairs...> <gc allocas...>
+/// Note that the last two sets of arguments are not currently length
+///   prefixed.
+class StatepointOpers {
+  // TODO:: we should change the STATEPOINT representation so that CC and
+  // Flags should be part of meta operands, with args and deopt operands, and
+  // gc operands all prefixed by their length and a type code. This would be
+  // much more consistent. 
+public:
+  // These values are aboolute offsets into the operands of the statepoint
+  // instruction.
+  enum { IDPos, NBytesPos, NCallArgsPos, CallTargetPos, MetaEnd };
+
+  // These values are relative offests from the start of the statepoint meta
+  // arguments (i.e. the end of the call arguments).
+  enum { CCOffset = 1, FlagsOffset = 3, NumDeoptOperandsOffset = 5 };
+
+  explicit StatepointOpers(const MachineInstr *MI) : MI(MI) {}
+
+  /// Get starting index of non call related arguments
+  /// (calling convention, statepoint flags, vm state and gc state).
+  unsigned getVarIdx() const {
+    return MI->getOperand(NCallArgsPos).getImm() + MetaEnd;
+  }
+
+  /// Return the ID for the given statepoint.
+  uint64_t getID() const { return MI->getOperand(IDPos).getImm(); }
+
+  /// Return the number of patchable bytes the given statepoint should emit.
+  uint32_t getNumPatchBytes() const {
+    return MI->getOperand(NBytesPos).getImm();
+  }
+
+  /// Returns the target of the underlying call.
+  const MachineOperand &getCallTarget() const {
+    return MI->getOperand(CallTargetPos);
+  }
+
+private:
+  const MachineInstr *MI;
+};
+
+class StackMaps {
+public:
+  struct Location {
+    enum LocationType {
+      Unprocessed,
+      Register,
+      Direct,
+      Indirect,
+      Constant,
+      ConstantIndex
+    };
+    LocationType Type = Unprocessed;
+    unsigned Size = 0;
+    unsigned Reg = 0;
+    int64_t Offset = 0;
+
+    Location() = default;
+    Location(LocationType Type, unsigned Size, unsigned Reg, int64_t Offset)
+        : Type(Type), Size(Size), Reg(Reg), Offset(Offset) {}
+  };
+
+  struct LiveOutReg {
+    unsigned short Reg = 0;
+    unsigned short DwarfRegNum = 0;
+    unsigned short Size = 0;
+
+    LiveOutReg() = default;
+    LiveOutReg(unsigned short Reg, unsigned short DwarfRegNum,
+               unsigned short Size)
+        : Reg(Reg), DwarfRegNum(DwarfRegNum), Size(Size) {}
+  };
+
+  // OpTypes are used to encode information about the following logical
+  // operand (which may consist of several MachineOperands) for the
+  // OpParser.
+  using OpType = enum { DirectMemRefOp, IndirectMemRefOp, ConstantOp };
+
+  StackMaps(AsmPrinter &AP);
+
+  void reset() {
+    CSInfos.clear();
+    ConstPool.clear();
+    FnInfos.clear();
+  }
+
+  /// \brief Generate a stackmap record for a stackmap instruction.
+  ///
+  /// MI must be a raw STACKMAP, not a PATCHPOINT.
+  void recordStackMap(const MachineInstr &MI);
+
+  /// \brief Generate a stackmap record for a patchpoint instruction.
+  void recordPatchPoint(const MachineInstr &MI);
+
+  /// \brief Generate a stackmap record for a statepoint instruction.
+  void recordStatepoint(const MachineInstr &MI);
+
+  /// If there is any stack map data, create a stack map section and serialize
+  /// the map info into it. This clears the stack map data structures
+  /// afterwards.
+  void serializeToStackMapSection();
+
+private:
+  static const char *WSMP;
+
+  using LocationVec = SmallVector<Location, 8>;
+  using LiveOutVec = SmallVector<LiveOutReg, 8>;
+  using ConstantPool = MapVector<uint64_t, uint64_t>;
+
+  struct FunctionInfo {
+    uint64_t StackSize = 0;
+    uint64_t RecordCount = 1;
+
+    FunctionInfo() = default;
+    explicit FunctionInfo(uint64_t StackSize) : StackSize(StackSize) {}
+  };
+
+  struct CallsiteInfo {
+    const MCExpr *CSOffsetExpr = nullptr;
+    uint64_t ID = 0;
+    LocationVec Locations;
+    LiveOutVec LiveOuts;
+
+    CallsiteInfo() = default;
+    CallsiteInfo(const MCExpr *CSOffsetExpr, uint64_t ID,
+                 LocationVec &&Locations, LiveOutVec &&LiveOuts)
+        : CSOffsetExpr(CSOffsetExpr), ID(ID), Locations(std::move(Locations)),
+          LiveOuts(std::move(LiveOuts)) {}
+  };
+
+  using FnInfoMap = MapVector<const MCSymbol *, FunctionInfo>;
+  using CallsiteInfoList = std::vector<CallsiteInfo>;
+
+  AsmPrinter &AP;
+  CallsiteInfoList CSInfos;
+  ConstantPool ConstPool;
+  FnInfoMap FnInfos;
+
+  MachineInstr::const_mop_iterator
+  parseOperand(MachineInstr::const_mop_iterator MOI,
+               MachineInstr::const_mop_iterator MOE, LocationVec &Locs,
+               LiveOutVec &LiveOuts) const;
+
+  /// \brief Create a live-out register record for the given register @p Reg.
+  LiveOutReg createLiveOutReg(unsigned Reg,
+                              const TargetRegisterInfo *TRI) const;
+
+  /// \brief Parse the register live-out mask and return a vector of live-out
+  /// registers that need to be recorded in the stackmap.
+  LiveOutVec parseRegisterLiveOutMask(const uint32_t *Mask) const;
+
+  /// This should be called by the MC lowering code _immediately_ before
+  /// lowering the MI to an MCInst. It records where the operands for the
+  /// instruction are stored, and outputs a label to record the offset of
+  /// the call from the start of the text section. In special cases (e.g. AnyReg
+  /// calling convention) the return register is also recorded if requested.
+  void recordStackMapOpers(const MachineInstr &MI, uint64_t ID,
+                           MachineInstr::const_mop_iterator MOI,
+                           MachineInstr::const_mop_iterator MOE,
+                           bool recordResult = false);
+
+  /// \brief Emit the stackmap header.
+  void emitStackmapHeader(MCStreamer &OS);
+
+  /// \brief Emit the function frame record for each function.
+  void emitFunctionFrameRecords(MCStreamer &OS);
+
+  /// \brief Emit the constant pool.
+  void emitConstantPoolEntries(MCStreamer &OS);
+
+  /// \brief Emit the callsite info for each stackmap/patchpoint intrinsic call.
+  void emitCallsiteEntries(MCStreamer &OS);
+
+  void print(raw_ostream &OS);
+  void debug() { print(dbgs()); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_STACKMAPS_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/StackProtector.h b/linux-x64/clang/include/llvm/CodeGen/StackProtector.h
new file mode 100644
index 0000000..72de212
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/StackProtector.h
@@ -0,0 +1,138 @@
+//===- StackProtector.h - Stack Protector Insertion -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass inserts stack protectors into functions which need them. A variable
+// with a random value in it is stored onto the stack before the local variables
+// are allocated. Upon exiting the block, the stored value is checked. If it's
+// changed, then there was some sort of violation and the program aborts.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_STACKPROTECTOR_H
+#define LLVM_CODEGEN_STACKPROTECTOR_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/ValueMap.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+class BasicBlock;
+class DominatorTree;
+class Function;
+class Instruction;
+class Module;
+class TargetLoweringBase;
+class TargetMachine;
+class Type;
+
+class StackProtector : public FunctionPass {
+public:
+  /// SSPLayoutKind.  Stack Smashing Protection (SSP) rules require that
+  /// vulnerable stack allocations are located close the stack protector.
+  enum SSPLayoutKind {
+    SSPLK_None,       ///< Did not trigger a stack protector.  No effect on data
+                      ///< layout.
+    SSPLK_LargeArray, ///< Array or nested array >= SSP-buffer-size.  Closest
+                      ///< to the stack protector.
+    SSPLK_SmallArray, ///< Array or nested array < SSP-buffer-size. 2nd closest
+                      ///< to the stack protector.
+    SSPLK_AddrOf      ///< The address of this allocation is exposed and
+                      ///< triggered protection.  3rd closest to the protector.
+  };
+
+  /// A mapping of AllocaInsts to their required SSP layout.
+  using SSPLayoutMap = ValueMap<const AllocaInst *, SSPLayoutKind>;
+
+private:
+  const TargetMachine *TM = nullptr;
+
+  /// TLI - Keep a pointer of a TargetLowering to consult for determining
+  /// target type sizes.
+  const TargetLoweringBase *TLI = nullptr;
+  Triple Trip;
+
+  Function *F;
+  Module *M;
+
+  DominatorTree *DT;
+
+  /// Layout - Mapping of allocations to the required SSPLayoutKind.
+  /// StackProtector analysis will update this map when determining if an
+  /// AllocaInst triggers a stack protector.
+  SSPLayoutMap Layout;
+
+  /// \brief The minimum size of buffers that will receive stack smashing
+  /// protection when -fstack-protection is used.
+  unsigned SSPBufferSize = 0;
+
+  /// VisitedPHIs - The set of PHI nodes visited when determining
+  /// if a variable's reference has been taken.  This set
+  /// is maintained to ensure we don't visit the same PHI node multiple
+  /// times.
+  SmallPtrSet<const PHINode *, 16> VisitedPHIs;
+
+  // A prologue is generated.
+  bool HasPrologue = false;
+
+  // IR checking code is generated.
+  bool HasIRCheck = false;
+
+  /// InsertStackProtectors - Insert code into the prologue and epilogue of
+  /// the function.
+  ///
+  ///  - The prologue code loads and stores the stack guard onto the stack.
+  ///  - The epilogue checks the value stored in the prologue against the
+  ///    original value. It calls __stack_chk_fail if they differ.
+  bool InsertStackProtectors();
+
+  /// CreateFailBB - Create a basic block to jump to when the stack protector
+  /// check fails.
+  BasicBlock *CreateFailBB();
+
+  /// ContainsProtectableArray - Check whether the type either is an array or
+  /// contains an array of sufficient size so that we need stack protectors
+  /// for it.
+  /// \param [out] IsLarge is set to true if a protectable array is found and
+  /// it is "large" ( >= ssp-buffer-size).  In the case of a structure with
+  /// multiple arrays, this gets set if any of them is large.
+  bool ContainsProtectableArray(Type *Ty, bool &IsLarge, bool Strong = false,
+                                bool InStruct = false) const;
+
+  /// \brief Check whether a stack allocation has its address taken.
+  bool HasAddressTaken(const Instruction *AI);
+
+  /// RequiresStackProtector - Check whether or not this function needs a
+  /// stack protector based upon the stack protector level.
+  bool RequiresStackProtector();
+
+public:
+  static char ID; // Pass identification, replacement for typeid.
+
+  StackProtector() : FunctionPass(ID), SSPBufferSize(8) {
+    initializeStackProtectorPass(*PassRegistry::getPassRegistry());
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  SSPLayoutKind getSSPLayout(const AllocaInst *AI) const;
+
+  // Return true if StackProtector is supposed to be handled by SelectionDAG.
+  bool shouldEmitSDCheck(const BasicBlock &BB) const;
+
+  void adjustForColoring(const AllocaInst *From, const AllocaInst *To);
+
+  bool runOnFunction(Function &Fn) override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_STACKPROTECTOR_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/TailDuplicator.h b/linux-x64/clang/include/llvm/CodeGen/TailDuplicator.h
new file mode 100644
index 0000000..be6562c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TailDuplicator.h
@@ -0,0 +1,128 @@
+//===- llvm/CodeGen/TailDuplicator.h ----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TailDuplicator class. Used by the
+// TailDuplication pass, and MachineBlockPlacement.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TAILDUPLICATOR_H
+#define LLVM_CODEGEN_TAILDUPLICATOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineBranchProbabilityInfo;
+class MachineFunction;
+class MachineInstr;
+class MachineModuleInfo;
+class MachineRegisterInfo;
+class TargetRegisterInfo;
+
+/// Utility class to perform tail duplication.
+class TailDuplicator {
+  const TargetInstrInfo *TII;
+  const TargetRegisterInfo *TRI;
+  const MachineBranchProbabilityInfo *MBPI;
+  const MachineModuleInfo *MMI;
+  MachineRegisterInfo *MRI;
+  MachineFunction *MF;
+  bool PreRegAlloc;
+  bool LayoutMode;
+  unsigned TailDupSize;
+
+  // A list of virtual registers for which to update SSA form.
+  SmallVector<unsigned, 16> SSAUpdateVRs;
+
+  // For each virtual register in SSAUpdateVals keep a list of source virtual
+  // registers.
+  using AvailableValsTy = std::vector<std::pair<MachineBasicBlock *, unsigned>>;
+
+  DenseMap<unsigned, AvailableValsTy> SSAUpdateVals;
+
+public:
+  /// Prepare to run on a specific machine function.
+  /// @param MF - Function that will be processed
+  /// @param PreRegAlloc - true if used before register allocation
+  /// @param MBPI - Branch Probability Info. Used to propagate correct
+  ///     probabilities when modifying the CFG.
+  /// @param LayoutMode - When true, don't use the existing layout to make
+  ///     decisions.
+  /// @param TailDupSize - Maxmimum size of blocks to tail-duplicate. Zero
+  ///     default implies using the command line value TailDupSize.
+  void initMF(MachineFunction &MF, bool PreRegAlloc,
+              const MachineBranchProbabilityInfo *MBPI,
+              bool LayoutMode, unsigned TailDupSize = 0);
+
+  bool tailDuplicateBlocks();
+  static bool isSimpleBB(MachineBasicBlock *TailBB);
+  bool shouldTailDuplicate(bool IsSimple, MachineBasicBlock &TailBB);
+
+  /// Returns true if TailBB can successfully be duplicated into PredBB
+  bool canTailDuplicate(MachineBasicBlock *TailBB, MachineBasicBlock *PredBB);
+
+  /// Tail duplicate a single basic block into its predecessors, and then clean
+  /// up.
+  /// If \p DuplicatePreds is not null, it will be updated to contain the list
+  /// of predecessors that received a copy of \p MBB.
+  /// If \p RemovalCallback is non-null. It will be called before MBB is
+  /// deleted.
+  bool tailDuplicateAndUpdate(
+      bool IsSimple, MachineBasicBlock *MBB,
+      MachineBasicBlock *ForcedLayoutPred,
+      SmallVectorImpl<MachineBasicBlock*> *DuplicatedPreds = nullptr,
+      function_ref<void(MachineBasicBlock *)> *RemovalCallback = nullptr);
+
+private:
+  using RegSubRegPair = TargetInstrInfo::RegSubRegPair;
+
+  void addSSAUpdateEntry(unsigned OrigReg, unsigned NewReg,
+                         MachineBasicBlock *BB);
+  void processPHI(MachineInstr *MI, MachineBasicBlock *TailBB,
+                  MachineBasicBlock *PredBB,
+                  DenseMap<unsigned, RegSubRegPair> &LocalVRMap,
+                  SmallVectorImpl<std::pair<unsigned, RegSubRegPair>> &Copies,
+                  const DenseSet<unsigned> &UsedByPhi, bool Remove);
+  void duplicateInstruction(MachineInstr *MI, MachineBasicBlock *TailBB,
+                            MachineBasicBlock *PredBB,
+                            DenseMap<unsigned, RegSubRegPair> &LocalVRMap,
+                            const DenseSet<unsigned> &UsedByPhi);
+  void updateSuccessorsPHIs(MachineBasicBlock *FromBB, bool isDead,
+                            SmallVectorImpl<MachineBasicBlock *> &TDBBs,
+                            SmallSetVector<MachineBasicBlock *, 8> &Succs);
+  bool canCompletelyDuplicateBB(MachineBasicBlock &BB);
+  bool duplicateSimpleBB(MachineBasicBlock *TailBB,
+                         SmallVectorImpl<MachineBasicBlock *> &TDBBs,
+                         const DenseSet<unsigned> &RegsUsedByPhi,
+                         SmallVectorImpl<MachineInstr *> &Copies);
+  bool tailDuplicate(bool IsSimple,
+                     MachineBasicBlock *TailBB,
+                     MachineBasicBlock *ForcedLayoutPred,
+                     SmallVectorImpl<MachineBasicBlock *> &TDBBs,
+                     SmallVectorImpl<MachineInstr *> &Copies);
+  void appendCopies(MachineBasicBlock *MBB,
+                 SmallVectorImpl<std::pair<unsigned,RegSubRegPair>> &CopyInfos,
+                 SmallVectorImpl<MachineInstr *> &Copies);
+
+  void removeDeadBlock(
+      MachineBasicBlock *MBB,
+      function_ref<void(MachineBasicBlock *)> *RemovalCallback = nullptr);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_TAILDUPLICATOR_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetCallingConv.h b/linux-x64/clang/include/llvm/CodeGen/TargetCallingConv.h
new file mode 100644
index 0000000..7d138f5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetCallingConv.h
@@ -0,0 +1,204 @@
+//===-- llvm/CodeGen/TargetCallingConv.h - Calling Convention ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines types for working with calling-convention information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETCALLINGCONV_H
+#define LLVM_CODEGEN_TARGETCALLINGCONV_H
+
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/Support/MachineValueType.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+#include <climits>
+#include <cstdint>
+
+namespace llvm {
+namespace ISD {
+
+  struct ArgFlagsTy {
+  private:
+    unsigned IsZExt : 1;     ///< Zero extended
+    unsigned IsSExt : 1;     ///< Sign extended
+    unsigned IsInReg : 1;    ///< Passed in register
+    unsigned IsSRet : 1;     ///< Hidden struct-ret ptr
+    unsigned IsByVal : 1;    ///< Struct passed by value
+    unsigned IsNest : 1;     ///< Nested fn static chain
+    unsigned IsReturned : 1; ///< Always returned
+    unsigned IsSplit : 1;
+    unsigned IsInAlloca : 1;   ///< Passed with inalloca
+    unsigned IsSplitEnd : 1;   ///< Last part of a split
+    unsigned IsSwiftSelf : 1;  ///< Swift self parameter
+    unsigned IsSwiftError : 1; ///< Swift error parameter
+    unsigned IsHva : 1;        ///< HVA field for
+    unsigned IsHvaStart : 1;   ///< HVA structure start
+    unsigned IsSecArgPass : 1; ///< Second argument
+    unsigned ByValAlign : 4;   ///< Log 2 of byval alignment
+    unsigned OrigAlign : 5;    ///< Log 2 of original alignment
+    unsigned IsInConsecutiveRegsLast : 1;
+    unsigned IsInConsecutiveRegs : 1;
+    unsigned IsCopyElisionCandidate : 1; ///< Argument copy elision candidate
+
+    unsigned ByValSize; ///< Byval struct size
+
+  public:
+    ArgFlagsTy()
+        : IsZExt(0), IsSExt(0), IsInReg(0), IsSRet(0), IsByVal(0), IsNest(0),
+          IsReturned(0), IsSplit(0), IsInAlloca(0), IsSplitEnd(0),
+          IsSwiftSelf(0), IsSwiftError(0), IsHva(0), IsHvaStart(0),
+          IsSecArgPass(0), ByValAlign(0), OrigAlign(0),
+          IsInConsecutiveRegsLast(0), IsInConsecutiveRegs(0),
+          IsCopyElisionCandidate(0), ByValSize(0) {
+      static_assert(sizeof(*this) == 2 * sizeof(unsigned), "flags are too big");
+    }
+
+    bool isZExt() const { return IsZExt; }
+    void setZExt() { IsZExt = 1; }
+
+    bool isSExt() const { return IsSExt; }
+    void setSExt() { IsSExt = 1; }
+
+    bool isInReg() const { return IsInReg; }
+    void setInReg() { IsInReg = 1; }
+
+    bool isSRet() const { return IsSRet; }
+    void setSRet() { IsSRet = 1; }
+
+    bool isByVal() const { return IsByVal; }
+    void setByVal() { IsByVal = 1; }
+
+    bool isInAlloca() const { return IsInAlloca; }
+    void setInAlloca() { IsInAlloca = 1; }
+
+    bool isSwiftSelf() const { return IsSwiftSelf; }
+    void setSwiftSelf() { IsSwiftSelf = 1; }
+
+    bool isSwiftError() const { return IsSwiftError; }
+    void setSwiftError() { IsSwiftError = 1; }
+
+    bool isHva() const { return IsHva; }
+    void setHva() { IsHva = 1; }
+
+    bool isHvaStart() const { return IsHvaStart; }
+    void setHvaStart() { IsHvaStart = 1; }
+
+    bool isSecArgPass() const { return IsSecArgPass; }
+    void setSecArgPass() { IsSecArgPass = 1; }
+
+    bool isNest() const { return IsNest; }
+    void setNest() { IsNest = 1; }
+
+    bool isReturned() const { return IsReturned; }
+    void setReturned() { IsReturned = 1; }
+
+    bool isInConsecutiveRegs()  const { return IsInConsecutiveRegs; }
+    void setInConsecutiveRegs() { IsInConsecutiveRegs = 1; }
+
+    bool isInConsecutiveRegsLast() const { return IsInConsecutiveRegsLast; }
+    void setInConsecutiveRegsLast() { IsInConsecutiveRegsLast = 1; }
+
+    bool isSplit()   const { return IsSplit; }
+    void setSplit()  { IsSplit = 1; }
+
+    bool isSplitEnd()   const { return IsSplitEnd; }
+    void setSplitEnd()  { IsSplitEnd = 1; }
+
+    bool isCopyElisionCandidate()  const { return IsCopyElisionCandidate; }
+    void setCopyElisionCandidate() { IsCopyElisionCandidate = 1; }
+
+    unsigned getByValAlign() const { return (1U << ByValAlign) / 2; }
+    void setByValAlign(unsigned A) {
+      ByValAlign = Log2_32(A) + 1;
+      assert(getByValAlign() == A && "bitfield overflow");
+    }
+
+    unsigned getOrigAlign() const { return (1U << OrigAlign) / 2; }
+    void setOrigAlign(unsigned A) {
+      OrigAlign = Log2_32(A) + 1;
+      assert(getOrigAlign() == A && "bitfield overflow");
+    }
+
+    unsigned getByValSize() const { return ByValSize; }
+    void setByValSize(unsigned S) { ByValSize = S; }
+  };
+
+  /// InputArg - This struct carries flags and type information about a
+  /// single incoming (formal) argument or incoming (from the perspective
+  /// of the caller) return value virtual register.
+  ///
+  struct InputArg {
+    ArgFlagsTy Flags;
+    MVT VT = MVT::Other;
+    EVT ArgVT;
+    bool Used = false;
+
+    /// Index original Function's argument.
+    unsigned OrigArgIndex;
+    /// Sentinel value for implicit machine-level input arguments.
+    static const unsigned NoArgIndex = UINT_MAX;
+
+    /// Offset in bytes of current input value relative to the beginning of
+    /// original argument. E.g. if argument was splitted into four 32 bit
+    /// registers, we got 4 InputArgs with PartOffsets 0, 4, 8 and 12.
+    unsigned PartOffset;
+
+    InputArg() = default;
+    InputArg(ArgFlagsTy flags, EVT vt, EVT argvt, bool used,
+             unsigned origIdx, unsigned partOffs)
+      : Flags(flags), Used(used), OrigArgIndex(origIdx), PartOffset(partOffs) {
+      VT = vt.getSimpleVT();
+      ArgVT = argvt;
+    }
+
+    bool isOrigArg() const {
+      return OrigArgIndex != NoArgIndex;
+    }
+
+    unsigned getOrigArgIndex() const {
+      assert(OrigArgIndex != NoArgIndex && "Implicit machine-level argument");
+      return OrigArgIndex;
+    }
+  };
+
+  /// OutputArg - This struct carries flags and a value for a
+  /// single outgoing (actual) argument or outgoing (from the perspective
+  /// of the caller) return value virtual register.
+  ///
+  struct OutputArg {
+    ArgFlagsTy Flags;
+    MVT VT;
+    EVT ArgVT;
+
+    /// IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
+    bool IsFixed = false;
+
+    /// Index original Function's argument.
+    unsigned OrigArgIndex;
+
+    /// Offset in bytes of current output value relative to the beginning of
+    /// original argument. E.g. if argument was splitted into four 32 bit
+    /// registers, we got 4 OutputArgs with PartOffsets 0, 4, 8 and 12.
+    unsigned PartOffset;
+
+    OutputArg() = default;
+    OutputArg(ArgFlagsTy flags, EVT vt, EVT argvt, bool isfixed,
+              unsigned origIdx, unsigned partOffs)
+      : Flags(flags), IsFixed(isfixed), OrigArgIndex(origIdx),
+        PartOffset(partOffs) {
+      VT = vt.getSimpleVT();
+      ArgVT = argvt;
+    }
+  };
+
+} // end namespace ISD
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_TARGETCALLINGCONV_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetFrameLowering.h b/linux-x64/clang/include/llvm/CodeGen/TargetFrameLowering.h
new file mode 100644
index 0000000..61f1cf0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetFrameLowering.h
@@ -0,0 +1,348 @@
+//===-- llvm/CodeGen/TargetFrameLowering.h ----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface to describe the layout of a stack frame on the target machine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETFRAMELOWERING_H
+#define LLVM_CODEGEN_TARGETFRAMELOWERING_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include <utility>
+#include <vector>
+
+namespace llvm {
+  class BitVector;
+  class CalleeSavedInfo;
+  class MachineFunction;
+  class RegScavenger;
+
+/// Information about stack frame layout on the target.  It holds the direction
+/// of stack growth, the known stack alignment on entry to each function, and
+/// the offset to the locals area.
+///
+/// The offset to the local area is the offset from the stack pointer on
+/// function entry to the first location where function data (local variables,
+/// spill locations) can be stored.
+class TargetFrameLowering {
+public:
+  enum StackDirection {
+    StackGrowsUp,        // Adding to the stack increases the stack address
+    StackGrowsDown       // Adding to the stack decreases the stack address
+  };
+
+  // Maps a callee saved register to a stack slot with a fixed offset.
+  struct SpillSlot {
+    unsigned Reg;
+    int Offset; // Offset relative to stack pointer on function entry.
+  };
+private:
+  StackDirection StackDir;
+  unsigned StackAlignment;
+  unsigned TransientStackAlignment;
+  int LocalAreaOffset;
+  bool StackRealignable;
+public:
+  TargetFrameLowering(StackDirection D, unsigned StackAl, int LAO,
+                      unsigned TransAl = 1, bool StackReal = true)
+    : StackDir(D), StackAlignment(StackAl), TransientStackAlignment(TransAl),
+      LocalAreaOffset(LAO), StackRealignable(StackReal) {}
+
+  virtual ~TargetFrameLowering();
+
+  // These methods return information that describes the abstract stack layout
+  // of the target machine.
+
+  /// getStackGrowthDirection - Return the direction the stack grows
+  ///
+  StackDirection getStackGrowthDirection() const { return StackDir; }
+
+  /// getStackAlignment - This method returns the number of bytes to which the
+  /// stack pointer must be aligned on entry to a function.  Typically, this
+  /// is the largest alignment for any data object in the target.
+  ///
+  unsigned getStackAlignment() const { return StackAlignment; }
+
+  /// alignSPAdjust - This method aligns the stack adjustment to the correct
+  /// alignment.
+  ///
+  int alignSPAdjust(int SPAdj) const {
+    if (SPAdj < 0) {
+      SPAdj = -alignTo(-SPAdj, StackAlignment);
+    } else {
+      SPAdj = alignTo(SPAdj, StackAlignment);
+    }
+    return SPAdj;
+  }
+
+  /// getTransientStackAlignment - This method returns the number of bytes to
+  /// which the stack pointer must be aligned at all times, even between
+  /// calls.
+  ///
+  unsigned getTransientStackAlignment() const {
+    return TransientStackAlignment;
+  }
+
+  /// isStackRealignable - This method returns whether the stack can be
+  /// realigned.
+  bool isStackRealignable() const {
+    return StackRealignable;
+  }
+
+  /// Return the skew that has to be applied to stack alignment under
+  /// certain conditions (e.g. stack was adjusted before function \p MF
+  /// was called).
+  virtual unsigned getStackAlignmentSkew(const MachineFunction &MF) const;
+
+  /// getOffsetOfLocalArea - This method returns the offset of the local area
+  /// from the stack pointer on entrance to a function.
+  ///
+  int getOffsetOfLocalArea() const { return LocalAreaOffset; }
+
+  /// isFPCloseToIncomingSP - Return true if the frame pointer is close to
+  /// the incoming stack pointer, false if it is close to the post-prologue
+  /// stack pointer.
+  virtual bool isFPCloseToIncomingSP() const { return true; }
+
+  /// assignCalleeSavedSpillSlots - Allows target to override spill slot
+  /// assignment logic.  If implemented, assignCalleeSavedSpillSlots() should
+  /// assign frame slots to all CSI entries and return true.  If this method
+  /// returns false, spill slots will be assigned using generic implementation.
+  /// assignCalleeSavedSpillSlots() may add, delete or rearrange elements of
+  /// CSI.
+  virtual bool
+  assignCalleeSavedSpillSlots(MachineFunction &MF,
+                              const TargetRegisterInfo *TRI,
+                              std::vector<CalleeSavedInfo> &CSI) const {
+    return false;
+  }
+
+  /// getCalleeSavedSpillSlots - This method returns a pointer to an array of
+  /// pairs, that contains an entry for each callee saved register that must be
+  /// spilled to a particular stack location if it is spilled.
+  ///
+  /// Each entry in this array contains a <register,offset> pair, indicating the
+  /// fixed offset from the incoming stack pointer that each register should be
+  /// spilled at. If a register is not listed here, the code generator is
+  /// allowed to spill it anywhere it chooses.
+  ///
+  virtual const SpillSlot *
+  getCalleeSavedSpillSlots(unsigned &NumEntries) const {
+    NumEntries = 0;
+    return nullptr;
+  }
+
+  /// targetHandlesStackFrameRounding - Returns true if the target is
+  /// responsible for rounding up the stack frame (probably at emitPrologue
+  /// time).
+  virtual bool targetHandlesStackFrameRounding() const {
+    return false;
+  }
+
+  /// Returns true if the target will correctly handle shrink wrapping.
+  virtual bool enableShrinkWrapping(const MachineFunction &MF) const {
+    return false;
+  }
+
+  /// Returns true if the stack slot holes in the fixed and callee-save stack
+  /// area should be used when allocating other stack locations to reduce stack
+  /// size.
+  virtual bool enableStackSlotScavenging(const MachineFunction &MF) const {
+    return false;
+  }
+
+  /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+  /// the function.
+  virtual void emitPrologue(MachineFunction &MF,
+                            MachineBasicBlock &MBB) const = 0;
+  virtual void emitEpilogue(MachineFunction &MF,
+                            MachineBasicBlock &MBB) const = 0;
+
+  /// Replace a StackProbe stub (if any) with the actual probe code inline
+  virtual void inlineStackProbe(MachineFunction &MF,
+                                MachineBasicBlock &PrologueMBB) const {}
+
+  /// Adjust the prologue to have the function use segmented stacks. This works
+  /// by adding a check even before the "normal" function prologue.
+  virtual void adjustForSegmentedStacks(MachineFunction &MF,
+                                        MachineBasicBlock &PrologueMBB) const {}
+
+  /// Adjust the prologue to add Erlang Run-Time System (ERTS) specific code in
+  /// the assembly prologue to explicitly handle the stack.
+  virtual void adjustForHiPEPrologue(MachineFunction &MF,
+                                     MachineBasicBlock &PrologueMBB) const {}
+
+  /// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee
+  /// saved registers and returns true if it isn't possible / profitable to do
+  /// so by issuing a series of store instructions via
+  /// storeRegToStackSlot(). Returns false otherwise.
+  virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+                                         MachineBasicBlock::iterator MI,
+                                        const std::vector<CalleeSavedInfo> &CSI,
+                                         const TargetRegisterInfo *TRI) const {
+    return false;
+  }
+
+  /// restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee
+  /// saved registers and returns true if it isn't possible / profitable to do
+  /// so by issuing a series of load instructions via loadRegToStackSlot().
+  /// If it returns true, and any of the registers in CSI is not restored,
+  /// it sets the corresponding Restored flag in CSI to false.
+  /// Returns false otherwise.
+  virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+                                           MachineBasicBlock::iterator MI,
+                                           std::vector<CalleeSavedInfo> &CSI,
+                                        const TargetRegisterInfo *TRI) const {
+    return false;
+  }
+
+  /// Return true if the target needs to disable frame pointer elimination.
+  virtual bool noFramePointerElim(const MachineFunction &MF) const;
+
+  /// hasFP - Return true if the specified function should have a dedicated
+  /// frame pointer register. For most targets this is true only if the function
+  /// has variable sized allocas or if frame pointer elimination is disabled.
+  virtual bool hasFP(const MachineFunction &MF) const = 0;
+
+  /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
+  /// not required, we reserve argument space for call sites in the function
+  /// immediately on entry to the current function. This eliminates the need for
+  /// add/sub sp brackets around call sites. Returns true if the call frame is
+  /// included as part of the stack frame.
+  virtual bool hasReservedCallFrame(const MachineFunction &MF) const {
+    return !hasFP(MF);
+  }
+
+  /// canSimplifyCallFramePseudos - When possible, it's best to simplify the
+  /// call frame pseudo ops before doing frame index elimination. This is
+  /// possible only when frame index references between the pseudos won't
+  /// need adjusting for the call frame adjustments. Normally, that's true
+  /// if the function has a reserved call frame or a frame pointer. Some
+  /// targets (Thumb2, for example) may have more complicated criteria,
+  /// however, and can override this behavior.
+  virtual bool canSimplifyCallFramePseudos(const MachineFunction &MF) const {
+    return hasReservedCallFrame(MF) || hasFP(MF);
+  }
+
+  // needsFrameIndexResolution - Do we need to perform FI resolution for
+  // this function. Normally, this is required only when the function
+  // has any stack objects. However, targets may want to override this.
+  virtual bool needsFrameIndexResolution(const MachineFunction &MF) const;
+
+  /// getFrameIndexReference - This method should return the base register
+  /// and offset used to reference a frame index location. The offset is
+  /// returned directly, and the base register is returned via FrameReg.
+  virtual int getFrameIndexReference(const MachineFunction &MF, int FI,
+                                     unsigned &FrameReg) const;
+
+  /// Same as \c getFrameIndexReference, except that the stack pointer (as
+  /// opposed to the frame pointer) will be the preferred value for \p
+  /// FrameReg. This is generally used for emitting statepoint or EH tables that
+  /// use offsets from RSP.  If \p IgnoreSPUpdates is true, the returned
+  /// offset is only guaranteed to be valid with respect to the value of SP at
+  /// the end of the prologue.
+  virtual int getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI,
+                                             unsigned &FrameReg,
+                                             bool IgnoreSPUpdates) const {
+    // Always safe to dispatch to getFrameIndexReference.
+    return getFrameIndexReference(MF, FI, FrameReg);
+  }
+
+  /// This method determines which of the registers reported by
+  /// TargetRegisterInfo::getCalleeSavedRegs() should actually get saved.
+  /// The default implementation checks populates the \p SavedRegs bitset with
+  /// all registers which are modified in the function, targets may override
+  /// this function to save additional registers.
+  /// This method also sets up the register scavenger ensuring there is a free
+  /// register or a frameindex available.
+  virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
+                                    RegScavenger *RS = nullptr) const;
+
+  /// processFunctionBeforeFrameFinalized - This method is called immediately
+  /// before the specified function's frame layout (MF.getFrameInfo()) is
+  /// finalized.  Once the frame is finalized, MO_FrameIndex operands are
+  /// replaced with direct constants.  This method is optional.
+  ///
+  virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF,
+                                             RegScavenger *RS = nullptr) const {
+  }
+
+  virtual unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const {
+    report_fatal_error("WinEH not implemented for this target");
+  }
+
+  /// This method is called during prolog/epilog code insertion to eliminate
+  /// call frame setup and destroy pseudo instructions (but only if the Target
+  /// is using them).  It is responsible for eliminating these instructions,
+  /// replacing them with concrete instructions.  This method need only be
+  /// implemented if using call frame setup/destroy pseudo instructions.
+  /// Returns an iterator pointing to the instruction after the replaced one.
+  virtual MachineBasicBlock::iterator
+  eliminateCallFramePseudoInstr(MachineFunction &MF,
+                                MachineBasicBlock &MBB,
+                                MachineBasicBlock::iterator MI) const {
+    llvm_unreachable("Call Frame Pseudo Instructions do not exist on this "
+                     "target!");
+  }
+
+
+  /// Order the symbols in the local stack frame.
+  /// The list of objects that we want to order is in \p objectsToAllocate as
+  /// indices into the MachineFrameInfo. The array can be reordered in any way
+  /// upon return. The contents of the array, however, may not be modified (i.e.
+  /// only their order may be changed).
+  /// By default, just maintain the original order.
+  virtual void
+  orderFrameObjects(const MachineFunction &MF,
+                    SmallVectorImpl<int> &objectsToAllocate) const {
+  }
+
+  /// Check whether or not the given \p MBB can be used as a prologue
+  /// for the target.
+  /// The prologue will be inserted first in this basic block.
+  /// This method is used by the shrink-wrapping pass to decide if
+  /// \p MBB will be correctly handled by the target.
+  /// As soon as the target enable shrink-wrapping without overriding
+  /// this method, we assume that each basic block is a valid
+  /// prologue.
+  virtual bool canUseAsPrologue(const MachineBasicBlock &MBB) const {
+    return true;
+  }
+
+  /// Check whether or not the given \p MBB can be used as a epilogue
+  /// for the target.
+  /// The epilogue will be inserted before the first terminator of that block.
+  /// This method is used by the shrink-wrapping pass to decide if
+  /// \p MBB will be correctly handled by the target.
+  /// As soon as the target enable shrink-wrapping without overriding
+  /// this method, we assume that each basic block is a valid
+  /// epilogue.
+  virtual bool canUseAsEpilogue(const MachineBasicBlock &MBB) const {
+    return true;
+  }
+
+  /// Check if given function is safe for not having callee saved registers.
+  /// This is used when interprocedural register allocation is enabled.
+  static bool isSafeForNoCSROpt(const Function &F) {
+    if (!F.hasLocalLinkage() || F.hasAddressTaken() ||
+        !F.hasFnAttribute(Attribute::NoRecurse))
+      return false;
+    // Function should not be optimized as tail call.
+    for (const User *U : F.users())
+      if (auto CS = ImmutableCallSite(U))
+        if (CS.isTailCall())
+          return false;
+    return true;
+  }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetInstrInfo.h b/linux-x64/clang/include/llvm/CodeGen/TargetInstrInfo.h
new file mode 100644
index 0000000..5c2a530
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetInstrInfo.h
@@ -0,0 +1,1710 @@
+//===- llvm/CodeGen/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the target machine instruction set to the code generator.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETINSTRINFO_H
+#define LLVM_TARGET_TARGETINSTRINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/None.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineCombinerPattern.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/Support/BranchProbability.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class DFAPacketizer;
+class InstrItineraryData;
+class LiveIntervals;
+class LiveVariables;
+class MachineMemOperand;
+class MachineRegisterInfo;
+class MCAsmInfo;
+class MCInst;
+struct MCSchedModel;
+class Module;
+class ScheduleDAG;
+class ScheduleHazardRecognizer;
+class SDNode;
+class SelectionDAG;
+class RegScavenger;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+class TargetSchedModel;
+class TargetSubtargetInfo;
+
+template <class T> class SmallVectorImpl;
+
+//---------------------------------------------------------------------------
+///
+/// TargetInstrInfo - Interface to description of machine instruction set
+///
+class TargetInstrInfo : public MCInstrInfo {
+public:
+  TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u,
+                  unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u)
+      : CallFrameSetupOpcode(CFSetupOpcode),
+        CallFrameDestroyOpcode(CFDestroyOpcode), CatchRetOpcode(CatchRetOpcode),
+        ReturnOpcode(ReturnOpcode) {}
+  TargetInstrInfo(const TargetInstrInfo &) = delete;
+  TargetInstrInfo &operator=(const TargetInstrInfo &) = delete;
+  virtual ~TargetInstrInfo();
+
+  static bool isGenericOpcode(unsigned Opc) {
+    return Opc <= TargetOpcode::GENERIC_OP_END;
+  }
+
+  /// Given a machine instruction descriptor, returns the register
+  /// class constraint for OpNum, or NULL.
+  const TargetRegisterClass *getRegClass(const MCInstrDesc &TID, unsigned OpNum,
+                                         const TargetRegisterInfo *TRI,
+                                         const MachineFunction &MF) const;
+
+  /// Return true if the instruction is trivially rematerializable, meaning it
+  /// has no side effects and requires no operands that aren't always available.
+  /// This means the only allowed uses are constants and unallocatable physical
+  /// registers so that the instructions result is independent of the place
+  /// in the function.
+  bool isTriviallyReMaterializable(const MachineInstr &MI,
+                                   AliasAnalysis *AA = nullptr) const {
+    return MI.getOpcode() == TargetOpcode::IMPLICIT_DEF ||
+           (MI.getDesc().isRematerializable() &&
+            (isReallyTriviallyReMaterializable(MI, AA) ||
+             isReallyTriviallyReMaterializableGeneric(MI, AA)));
+  }
+
+protected:
+  /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
+  /// set, this hook lets the target specify whether the instruction is actually
+  /// trivially rematerializable, taking into consideration its operands. This
+  /// predicate must return false if the instruction has any side effects other
+  /// than producing a value, or if it requres any address registers that are
+  /// not always available.
+  /// Requirements must be check as stated in isTriviallyReMaterializable() .
+  virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI,
+                                                 AliasAnalysis *AA) const {
+    return false;
+  }
+
+  /// This method commutes the operands of the given machine instruction MI.
+  /// The operands to be commuted are specified by their indices OpIdx1 and
+  /// OpIdx2.
+  ///
+  /// If a target has any instructions that are commutable but require
+  /// converting to different instructions or making non-trivial changes
+  /// to commute them, this method can be overloaded to do that.
+  /// The default implementation simply swaps the commutable operands.
+  ///
+  /// If NewMI is false, MI is modified in place and returned; otherwise, a
+  /// new machine instruction is created and returned.
+  ///
+  /// Do not call this method for a non-commutable instruction.
+  /// Even though the instruction is commutable, the method may still
+  /// fail to commute the operands, null pointer is returned in such cases.
+  virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
+                                               unsigned OpIdx1,
+                                               unsigned OpIdx2) const;
+
+  /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable
+  /// operand indices to (ResultIdx1, ResultIdx2).
+  /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be
+  /// predefined to some indices or be undefined (designated by the special
+  /// value 'CommuteAnyOperandIndex').
+  /// The predefined result indices cannot be re-defined.
+  /// The function returns true iff after the result pair redefinition
+  /// the fixed result pair is equal to or equivalent to the source pair of
+  /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that
+  /// the pairs (x,y) and (y,x) are equivalent.
+  static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2,
+                                   unsigned CommutableOpIdx1,
+                                   unsigned CommutableOpIdx2);
+
+private:
+  /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
+  /// set and the target hook isReallyTriviallyReMaterializable returns false,
+  /// this function does target-independent tests to determine if the
+  /// instruction is really trivially rematerializable.
+  bool isReallyTriviallyReMaterializableGeneric(const MachineInstr &MI,
+                                                AliasAnalysis *AA) const;
+
+public:
+  /// These methods return the opcode of the frame setup/destroy instructions
+  /// if they exist (-1 otherwise).  Some targets use pseudo instructions in
+  /// order to abstract away the difference between operating with a frame
+  /// pointer and operating without, through the use of these two instructions.
+  ///
+  unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
+  unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
+
+  /// Returns true if the argument is a frame pseudo instruction.
+  bool isFrameInstr(const MachineInstr &I) const {
+    return I.getOpcode() == getCallFrameSetupOpcode() ||
+           I.getOpcode() == getCallFrameDestroyOpcode();
+  }
+
+  /// Returns true if the argument is a frame setup pseudo instruction.
+  bool isFrameSetup(const MachineInstr &I) const {
+    return I.getOpcode() == getCallFrameSetupOpcode();
+  }
+
+  /// Returns size of the frame associated with the given frame instruction.
+  /// For frame setup instruction this is frame that is set up space set up
+  /// after the instruction. For frame destroy instruction this is the frame
+  /// freed by the caller.
+  /// Note, in some cases a call frame (or a part of it) may be prepared prior
+  /// to the frame setup instruction. It occurs in the calls that involve
+  /// inalloca arguments. This function reports only the size of the frame part
+  /// that is set up between the frame setup and destroy pseudo instructions.
+  int64_t getFrameSize(const MachineInstr &I) const {
+    assert(isFrameInstr(I) && "Not a frame instruction");
+    assert(I.getOperand(0).getImm() >= 0);
+    return I.getOperand(0).getImm();
+  }
+
+  /// Returns the total frame size, which is made up of the space set up inside
+  /// the pair of frame start-stop instructions and the space that is set up
+  /// prior to the pair.
+  int64_t getFrameTotalSize(const MachineInstr &I) const {
+    if (isFrameSetup(I)) {
+      assert(I.getOperand(1).getImm() >= 0 &&
+             "Frame size must not be negative");
+      return getFrameSize(I) + I.getOperand(1).getImm();
+    }
+    return getFrameSize(I);
+  }
+
+  unsigned getCatchReturnOpcode() const { return CatchRetOpcode; }
+  unsigned getReturnOpcode() const { return ReturnOpcode; }
+
+  /// Returns the actual stack pointer adjustment made by an instruction
+  /// as part of a call sequence. By default, only call frame setup/destroy
+  /// instructions adjust the stack, but targets may want to override this
+  /// to enable more fine-grained adjustment, or adjust by a different value.
+  virtual int getSPAdjust(const MachineInstr &MI) const;
+
+  /// Return true if the instruction is a "coalescable" extension instruction.
+  /// That is, it's like a copy where it's legal for the source to overlap the
+  /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's
+  /// expected the pre-extension value is available as a subreg of the result
+  /// register. This also returns the sub-register index in SubIdx.
+  virtual bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
+                                     unsigned &DstReg, unsigned &SubIdx) const {
+    return false;
+  }
+
+  /// If the specified machine instruction is a direct
+  /// load from a stack slot, return the virtual or physical register number of
+  /// the destination along with the FrameIndex of the loaded stack slot.  If
+  /// not, return 0.  This predicate must return 0 if the instruction has
+  /// any side effects other than loading from the stack slot.
+  virtual unsigned isLoadFromStackSlot(const MachineInstr &MI,
+                                       int &FrameIndex) const {
+    return 0;
+  }
+
+  /// Check for post-frame ptr elimination stack locations as well.
+  /// This uses a heuristic so it isn't reliable for correctness.
+  virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
+                                             int &FrameIndex) const {
+    return 0;
+  }
+
+  /// If the specified machine instruction has a load from a stack slot,
+  /// return true along with the FrameIndex of the loaded stack slot and the
+  /// machine mem operand containing the reference.
+  /// If not, return false.  Unlike isLoadFromStackSlot, this returns true for
+  /// any instructions that loads from the stack.  This is just a hint, as some
+  /// cases may be missed.
+  virtual bool hasLoadFromStackSlot(const MachineInstr &MI,
+                                    const MachineMemOperand *&MMO,
+                                    int &FrameIndex) const;
+
+  /// If the specified machine instruction is a direct
+  /// store to a stack slot, return the virtual or physical register number of
+  /// the source reg along with the FrameIndex of the loaded stack slot.  If
+  /// not, return 0.  This predicate must return 0 if the instruction has
+  /// any side effects other than storing to the stack slot.
+  virtual unsigned isStoreToStackSlot(const MachineInstr &MI,
+                                      int &FrameIndex) const {
+    return 0;
+  }
+
+  /// Check for post-frame ptr elimination stack locations as well.
+  /// This uses a heuristic, so it isn't reliable for correctness.
+  virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
+                                            int &FrameIndex) const {
+    return 0;
+  }
+
+  /// If the specified machine instruction has a store to a stack slot,
+  /// return true along with the FrameIndex of the loaded stack slot and the
+  /// machine mem operand containing the reference.
+  /// If not, return false.  Unlike isStoreToStackSlot,
+  /// this returns true for any instructions that stores to the
+  /// stack.  This is just a hint, as some cases may be missed.
+  virtual bool hasStoreToStackSlot(const MachineInstr &MI,
+                                   const MachineMemOperand *&MMO,
+                                   int &FrameIndex) const;
+
+  /// Return true if the specified machine instruction
+  /// is a copy of one stack slot to another and has no other effect.
+  /// Provide the identity of the two frame indices.
+  virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
+                               int &SrcFrameIndex) const {
+    return false;
+  }
+
+  /// Compute the size in bytes and offset within a stack slot of a spilled
+  /// register or subregister.
+  ///
+  /// \param [out] Size in bytes of the spilled value.
+  /// \param [out] Offset in bytes within the stack slot.
+  /// \returns true if both Size and Offset are successfully computed.
+  ///
+  /// Not all subregisters have computable spill slots. For example,
+  /// subregisters registers may not be byte-sized, and a pair of discontiguous
+  /// subregisters has no single offset.
+  ///
+  /// Targets with nontrivial bigendian implementations may need to override
+  /// this, particularly to support spilled vector registers.
+  virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx,
+                                 unsigned &Size, unsigned &Offset,
+                                 const MachineFunction &MF) const;
+
+  /// Returns the size in bytes of the specified MachineInstr, or ~0U
+  /// when this function is not implemented by a target.
+  virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const {
+    return ~0U;
+  }
+
+  /// Return true if the instruction is as cheap as a move instruction.
+  ///
+  /// Targets for different archs need to override this, and different
+  /// micro-architectures can also be finely tuned inside.
+  virtual bool isAsCheapAsAMove(const MachineInstr &MI) const {
+    return MI.isAsCheapAsAMove();
+  }
+
+  /// Return true if the instruction should be sunk by MachineSink.
+  ///
+  /// MachineSink determines on its own whether the instruction is safe to sink;
+  /// this gives the target a hook to override the default behavior with regards
+  /// to which instructions should be sunk.
+  virtual bool shouldSink(const MachineInstr &MI) const { return true; }
+
+  /// Re-issue the specified 'original' instruction at the
+  /// specific location targeting a new destination register.
+  /// The register in Orig->getOperand(0).getReg() will be substituted by
+  /// DestReg:SubIdx. Any existing subreg index is preserved or composed with
+  /// SubIdx.
+  virtual void reMaterialize(MachineBasicBlock &MBB,
+                             MachineBasicBlock::iterator MI, unsigned DestReg,
+                             unsigned SubIdx, const MachineInstr &Orig,
+                             const TargetRegisterInfo &TRI) const;
+
+  /// \brief Clones instruction or the whole instruction bundle \p Orig and
+  /// insert into \p MBB before \p InsertBefore. The target may update operands
+  /// that are required to be unique.
+  ///
+  /// \p Orig must not return true for MachineInstr::isNotDuplicable().
+  virtual MachineInstr &duplicate(MachineBasicBlock &MBB,
+                                  MachineBasicBlock::iterator InsertBefore,
+                                  const MachineInstr &Orig) const;
+
+  /// This method must be implemented by targets that
+  /// set the M_CONVERTIBLE_TO_3_ADDR flag.  When this flag is set, the target
+  /// may be able to convert a two-address instruction into one or more true
+  /// three-address instructions on demand.  This allows the X86 target (for
+  /// example) to convert ADD and SHL instructions into LEA instructions if they
+  /// would require register copies due to two-addressness.
+  ///
+  /// This method returns a null pointer if the transformation cannot be
+  /// performed, otherwise it returns the last new instruction.
+  ///
+  virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
+                                              MachineInstr &MI,
+                                              LiveVariables *LV) const {
+    return nullptr;
+  }
+
+  // This constant can be used as an input value of operand index passed to
+  // the method findCommutedOpIndices() to tell the method that the
+  // corresponding operand index is not pre-defined and that the method
+  // can pick any commutable operand.
+  static const unsigned CommuteAnyOperandIndex = ~0U;
+
+  /// This method commutes the operands of the given machine instruction MI.
+  ///
+  /// The operands to be commuted are specified by their indices OpIdx1 and
+  /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value
+  /// 'CommuteAnyOperandIndex', which means that the method is free to choose
+  /// any arbitrarily chosen commutable operand. If both arguments are set to
+  /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable
+  /// operands; then commutes them if such operands could be found.
+  ///
+  /// If NewMI is false, MI is modified in place and returned; otherwise, a
+  /// new machine instruction is created and returned.
+  ///
+  /// Do not call this method for a non-commutable instruction or
+  /// for non-commuable operands.
+  /// Even though the instruction is commutable, the method may still
+  /// fail to commute the operands, null pointer is returned in such cases.
+  MachineInstr *
+  commuteInstruction(MachineInstr &MI, bool NewMI = false,
+                     unsigned OpIdx1 = CommuteAnyOperandIndex,
+                     unsigned OpIdx2 = CommuteAnyOperandIndex) const;
+
+  /// Returns true iff the routine could find two commutable operands in the
+  /// given machine instruction.
+  /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments.
+  /// If any of the INPUT values is set to the special value
+  /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable
+  /// operand, then returns its index in the corresponding argument.
+  /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method
+  /// looks for 2 commutable operands.
+  /// If INPUT values refer to some operands of MI, then the method simply
+  /// returns true if the corresponding operands are commutable and returns
+  /// false otherwise.
+  ///
+  /// For example, calling this method this way:
+  ///     unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
+  ///     findCommutedOpIndices(MI, Op1, Op2);
+  /// can be interpreted as a query asking to find an operand that would be
+  /// commutable with the operand#1.
+  virtual bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
+                                     unsigned &SrcOpIdx2) const;
+
+  /// A pair composed of a register and a sub-register index.
+  /// Used to give some type checking when modeling Reg:SubReg.
+  struct RegSubRegPair {
+    unsigned Reg;
+    unsigned SubReg;
+
+    RegSubRegPair(unsigned Reg = 0, unsigned SubReg = 0)
+        : Reg(Reg), SubReg(SubReg) {}
+  };
+
+  /// A pair composed of a pair of a register and a sub-register index,
+  /// and another sub-register index.
+  /// Used to give some type checking when modeling Reg:SubReg1, SubReg2.
+  struct RegSubRegPairAndIdx : RegSubRegPair {
+    unsigned SubIdx;
+
+    RegSubRegPairAndIdx(unsigned Reg = 0, unsigned SubReg = 0,
+                        unsigned SubIdx = 0)
+        : RegSubRegPair(Reg, SubReg), SubIdx(SubIdx) {}
+  };
+
+  /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI
+  /// and \p DefIdx.
+  /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
+  /// the list is modeled as <Reg:SubReg, SubIdx>. Operands with the undef
+  /// flag are not added to this list.
+  /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce
+  /// two elements:
+  /// - %1:sub1, sub0
+  /// - %2<:0>, sub1
+  ///
+  /// \returns true if it is possible to build such an input sequence
+  /// with the pair \p MI, \p DefIdx. False otherwise.
+  ///
+  /// \pre MI.isRegSequence() or MI.isRegSequenceLike().
+  ///
+  /// \note The generic implementation does not provide any support for
+  /// MI.isRegSequenceLike(). In other words, one has to override
+  /// getRegSequenceLikeInputs for target specific instructions.
+  bool
+  getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx,
+                       SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const;
+
+  /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
+  /// and \p DefIdx.
+  /// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
+  /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce:
+  /// - %1:sub1, sub0
+  ///
+  /// \returns true if it is possible to build such an input sequence
+  /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
+  /// False otherwise.
+  ///
+  /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike().
+  ///
+  /// \note The generic implementation does not provide any support for
+  /// MI.isExtractSubregLike(). In other words, one has to override
+  /// getExtractSubregLikeInputs for target specific instructions.
+  bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx,
+                              RegSubRegPairAndIdx &InputReg) const;
+
+  /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI
+  /// and \p DefIdx.
+  /// \p [out] BaseReg and \p [out] InsertedReg contain
+  /// the equivalent inputs of INSERT_SUBREG.
+  /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce:
+  /// - BaseReg: %0:sub0
+  /// - InsertedReg: %1:sub1, sub3
+  ///
+  /// \returns true if it is possible to build such an input sequence
+  /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
+  /// False otherwise.
+  ///
+  /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike().
+  ///
+  /// \note The generic implementation does not provide any support for
+  /// MI.isInsertSubregLike(). In other words, one has to override
+  /// getInsertSubregLikeInputs for target specific instructions.
+  bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx,
+                             RegSubRegPair &BaseReg,
+                             RegSubRegPairAndIdx &InsertedReg) const;
+
+  /// Return true if two machine instructions would produce identical values.
+  /// By default, this is only true when the two instructions
+  /// are deemed identical except for defs. If this function is called when the
+  /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
+  /// aggressive checks.
+  virtual bool produceSameValue(const MachineInstr &MI0,
+                                const MachineInstr &MI1,
+                                const MachineRegisterInfo *MRI = nullptr) const;
+
+  /// \returns true if a branch from an instruction with opcode \p BranchOpc
+  ///  bytes is capable of jumping to a position \p BrOffset bytes away.
+  virtual bool isBranchOffsetInRange(unsigned BranchOpc,
+                                     int64_t BrOffset) const {
+    llvm_unreachable("target did not implement");
+  }
+
+  /// \returns The block that branch instruction \p MI jumps to.
+  virtual MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const {
+    llvm_unreachable("target did not implement");
+  }
+
+  /// Insert an unconditional indirect branch at the end of \p MBB to \p
+  /// NewDestBB.  \p BrOffset indicates the offset of \p NewDestBB relative to
+  /// the offset of the position to insert the new branch.
+  ///
+  /// \returns The number of bytes added to the block.
+  virtual unsigned insertIndirectBranch(MachineBasicBlock &MBB,
+                                        MachineBasicBlock &NewDestBB,
+                                        const DebugLoc &DL,
+                                        int64_t BrOffset = 0,
+                                        RegScavenger *RS = nullptr) const {
+    llvm_unreachable("target did not implement");
+  }
+
+  /// Analyze the branching code at the end of MBB, returning
+  /// true if it cannot be understood (e.g. it's a switch dispatch or isn't
+  /// implemented for a target).  Upon success, this returns false and returns
+  /// with the following information in various cases:
+  ///
+  /// 1. If this block ends with no branches (it just falls through to its succ)
+  ///    just return false, leaving TBB/FBB null.
+  /// 2. If this block ends with only an unconditional branch, it sets TBB to be
+  ///    the destination block.
+  /// 3. If this block ends with a conditional branch and it falls through to a
+  ///    successor block, it sets TBB to be the branch destination block and a
+  ///    list of operands that evaluate the condition. These operands can be
+  ///    passed to other TargetInstrInfo methods to create new branches.
+  /// 4. If this block ends with a conditional branch followed by an
+  ///    unconditional branch, it returns the 'true' destination in TBB, the
+  ///    'false' destination in FBB, and a list of operands that evaluate the
+  ///    condition.  These operands can be passed to other TargetInstrInfo
+  ///    methods to create new branches.
+  ///
+  /// Note that removeBranch and insertBranch must be implemented to support
+  /// cases where this method returns success.
+  ///
+  /// If AllowModify is true, then this routine is allowed to modify the basic
+  /// block (e.g. delete instructions after the unconditional branch).
+  ///
+  /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
+  /// before calling this function.
+  virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+                             MachineBasicBlock *&FBB,
+                             SmallVectorImpl<MachineOperand> &Cond,
+                             bool AllowModify = false) const {
+    return true;
+  }
+
+  /// Represents a predicate at the MachineFunction level.  The control flow a
+  /// MachineBranchPredicate represents is:
+  ///
+  ///  Reg = LHS `Predicate` RHS         == ConditionDef
+  ///  if Reg then goto TrueDest else goto FalseDest
+  ///
+  struct MachineBranchPredicate {
+    enum ComparePredicate {
+      PRED_EQ,     // True if two values are equal
+      PRED_NE,     // True if two values are not equal
+      PRED_INVALID // Sentinel value
+    };
+
+    ComparePredicate Predicate = PRED_INVALID;
+    MachineOperand LHS = MachineOperand::CreateImm(0);
+    MachineOperand RHS = MachineOperand::CreateImm(0);
+    MachineBasicBlock *TrueDest = nullptr;
+    MachineBasicBlock *FalseDest = nullptr;
+    MachineInstr *ConditionDef = nullptr;
+
+    /// SingleUseCondition is true if ConditionDef is dead except for the
+    /// branch(es) at the end of the basic block.
+    ///
+    bool SingleUseCondition = false;
+
+    explicit MachineBranchPredicate() = default;
+  };
+
+  /// Analyze the branching code at the end of MBB and parse it into the
+  /// MachineBranchPredicate structure if possible.  Returns false on success
+  /// and true on failure.
+  ///
+  /// If AllowModify is true, then this routine is allowed to modify the basic
+  /// block (e.g. delete instructions after the unconditional branch).
+  ///
+  virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB,
+                                      MachineBranchPredicate &MBP,
+                                      bool AllowModify = false) const {
+    return true;
+  }
+
+  /// Remove the branching code at the end of the specific MBB.
+  /// This is only invoked in cases where AnalyzeBranch returns success. It
+  /// returns the number of instructions that were removed.
+  /// If \p BytesRemoved is non-null, report the change in code size from the
+  /// removed instructions.
+  virtual unsigned removeBranch(MachineBasicBlock &MBB,
+                                int *BytesRemoved = nullptr) const {
+    llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!");
+  }
+
+  /// Insert branch code into the end of the specified MachineBasicBlock. The
+  /// operands to this method are the same as those returned by AnalyzeBranch.
+  /// This is only invoked in cases where AnalyzeBranch returns success. It
+  /// returns the number of instructions inserted. If \p BytesAdded is non-null,
+  /// report the change in code size from the added instructions.
+  ///
+  /// It is also invoked by tail merging to add unconditional branches in
+  /// cases where AnalyzeBranch doesn't apply because there was no original
+  /// branch to analyze.  At least this much must be implemented, else tail
+  /// merging needs to be disabled.
+  ///
+  /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
+  /// before calling this function.
+  virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+                                MachineBasicBlock *FBB,
+                                ArrayRef<MachineOperand> Cond,
+                                const DebugLoc &DL,
+                                int *BytesAdded = nullptr) const {
+    llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!");
+  }
+
+  unsigned insertUnconditionalBranch(MachineBasicBlock &MBB,
+                                     MachineBasicBlock *DestBB,
+                                     const DebugLoc &DL,
+                                     int *BytesAdded = nullptr) const {
+    return insertBranch(MBB, DestBB, nullptr, ArrayRef<MachineOperand>(), DL,
+                        BytesAdded);
+  }
+
+  /// Analyze the loop code, return true if it cannot be understoo. Upon
+  /// success, this function returns false and returns information about the
+  /// induction variable and compare instruction used at the end.
+  virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst,
+                           MachineInstr *&CmpInst) const {
+    return true;
+  }
+
+  /// Generate code to reduce the loop iteration by one and check if the loop
+  /// is finished.  Return the value/register of the new loop count.  We need
+  /// this function when peeling off one or more iterations of a loop. This
+  /// function assumes the nth iteration is peeled first.
+  virtual unsigned reduceLoopCount(MachineBasicBlock &MBB, MachineInstr *IndVar,
+                                   MachineInstr &Cmp,
+                                   SmallVectorImpl<MachineOperand> &Cond,
+                                   SmallVectorImpl<MachineInstr *> &PrevInsts,
+                                   unsigned Iter, unsigned MaxIter) const {
+    llvm_unreachable("Target didn't implement ReduceLoopCount");
+  }
+
+  /// Delete the instruction OldInst and everything after it, replacing it with
+  /// an unconditional branch to NewDest. This is used by the tail merging pass.
+  virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
+                                       MachineBasicBlock *NewDest) const;
+
+  /// Return true if it's legal to split the given basic
+  /// block at the specified instruction (i.e. instruction would be the start
+  /// of a new basic block).
+  virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
+                                   MachineBasicBlock::iterator MBBI) const {
+    return true;
+  }
+
+  /// Return true if it's profitable to predicate
+  /// instructions with accumulated instruction latency of "NumCycles"
+  /// of the specified basic block, where the probability of the instructions
+  /// being executed is given by Probability, and Confidence is a measure
+  /// of our confidence that it will be properly predicted.
+  virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
+                                   unsigned ExtraPredCycles,
+                                   BranchProbability Probability) const {
+    return false;
+  }
+
+  /// Second variant of isProfitableToIfCvt. This one
+  /// checks for the case where two basic blocks from true and false path
+  /// of a if-then-else (diamond) are predicated on mutally exclusive
+  /// predicates, where the probability of the true path being taken is given
+  /// by Probability, and Confidence is a measure of our confidence that it
+  /// will be properly predicted.
+  virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles,
+                                   unsigned ExtraTCycles,
+                                   MachineBasicBlock &FMBB, unsigned NumFCycles,
+                                   unsigned ExtraFCycles,
+                                   BranchProbability Probability) const {
+    return false;
+  }
+
+  /// Return true if it's profitable for if-converter to duplicate instructions
+  /// of specified accumulated instruction latencies in the specified MBB to
+  /// enable if-conversion.
+  /// The probability of the instructions being executed is given by
+  /// Probability, and Confidence is a measure of our confidence that it
+  /// will be properly predicted.
+  virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
+                                         unsigned NumCycles,
+                                         BranchProbability Probability) const {
+    return false;
+  }
+
+  /// Return true if it's profitable to unpredicate
+  /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually
+  /// exclusive predicates.
+  /// e.g.
+  ///   subeq  r0, r1, #1
+  ///   addne  r0, r1, #1
+  /// =>
+  ///   sub    r0, r1, #1
+  ///   addne  r0, r1, #1
+  ///
+  /// This may be profitable is conditional instructions are always executed.
+  virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
+                                         MachineBasicBlock &FMBB) const {
+    return false;
+  }
+
+  /// Return true if it is possible to insert a select
+  /// instruction that chooses between TrueReg and FalseReg based on the
+  /// condition code in Cond.
+  ///
+  /// When successful, also return the latency in cycles from TrueReg,
+  /// FalseReg, and Cond to the destination register. In most cases, a select
+  /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1
+  ///
+  /// Some x86 implementations have 2-cycle cmov instructions.
+  ///
+  /// @param MBB         Block where select instruction would be inserted.
+  /// @param Cond        Condition returned by AnalyzeBranch.
+  /// @param TrueReg     Virtual register to select when Cond is true.
+  /// @param FalseReg    Virtual register to select when Cond is false.
+  /// @param CondCycles  Latency from Cond+Branch to select output.
+  /// @param TrueCycles  Latency from TrueReg to select output.
+  /// @param FalseCycles Latency from FalseReg to select output.
+  virtual bool canInsertSelect(const MachineBasicBlock &MBB,
+                               ArrayRef<MachineOperand> Cond, unsigned TrueReg,
+                               unsigned FalseReg, int &CondCycles,
+                               int &TrueCycles, int &FalseCycles) const {
+    return false;
+  }
+
+  /// Insert a select instruction into MBB before I that will copy TrueReg to
+  /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false.
+  ///
+  /// This function can only be called after canInsertSelect() returned true.
+  /// The condition in Cond comes from AnalyzeBranch, and it can be assumed
+  /// that the same flags or registers required by Cond are available at the
+  /// insertion point.
+  ///
+  /// @param MBB      Block where select instruction should be inserted.
+  /// @param I        Insertion point.
+  /// @param DL       Source location for debugging.
+  /// @param DstReg   Virtual register to be defined by select instruction.
+  /// @param Cond     Condition as computed by AnalyzeBranch.
+  /// @param TrueReg  Virtual register to copy when Cond is true.
+  /// @param FalseReg Virtual register to copy when Cons is false.
+  virtual void insertSelect(MachineBasicBlock &MBB,
+                            MachineBasicBlock::iterator I, const DebugLoc &DL,
+                            unsigned DstReg, ArrayRef<MachineOperand> Cond,
+                            unsigned TrueReg, unsigned FalseReg) const {
+    llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
+  }
+
+  /// Analyze the given select instruction, returning true if
+  /// it cannot be understood. It is assumed that MI->isSelect() is true.
+  ///
+  /// When successful, return the controlling condition and the operands that
+  /// determine the true and false result values.
+  ///
+  ///   Result = SELECT Cond, TrueOp, FalseOp
+  ///
+  /// Some targets can optimize select instructions, for example by predicating
+  /// the instruction defining one of the operands. Such targets should set
+  /// Optimizable.
+  ///
+  /// @param         MI Select instruction to analyze.
+  /// @param Cond    Condition controlling the select.
+  /// @param TrueOp  Operand number of the value selected when Cond is true.
+  /// @param FalseOp Operand number of the value selected when Cond is false.
+  /// @param Optimizable Returned as true if MI is optimizable.
+  /// @returns False on success.
+  virtual bool analyzeSelect(const MachineInstr &MI,
+                             SmallVectorImpl<MachineOperand> &Cond,
+                             unsigned &TrueOp, unsigned &FalseOp,
+                             bool &Optimizable) const {
+    assert(MI.getDesc().isSelect() && "MI must be a select instruction");
+    return true;
+  }
+
+  /// Given a select instruction that was understood by
+  /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by
+  /// merging it with one of its operands. Returns NULL on failure.
+  ///
+  /// When successful, returns the new select instruction. The client is
+  /// responsible for deleting MI.
+  ///
+  /// If both sides of the select can be optimized, PreferFalse is used to pick
+  /// a side.
+  ///
+  /// @param MI          Optimizable select instruction.
+  /// @param NewMIs     Set that record all MIs in the basic block up to \p
+  /// MI. Has to be updated with any newly created MI or deleted ones.
+  /// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
+  /// @returns Optimized instruction or NULL.
+  virtual MachineInstr *optimizeSelect(MachineInstr &MI,
+                                       SmallPtrSetImpl<MachineInstr *> &NewMIs,
+                                       bool PreferFalse = false) const {
+    // This function must be implemented if Optimizable is ever set.
+    llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!");
+  }
+
+  /// Emit instructions to copy a pair of physical registers.
+  ///
+  /// This function should support copies within any legal register class as
+  /// well as any cross-class copies created during instruction selection.
+  ///
+  /// The source and destination registers may overlap, which may require a
+  /// careful implementation when multiple copy instructions are required for
+  /// large registers. See for example the ARM target.
+  virtual void copyPhysReg(MachineBasicBlock &MBB,
+                           MachineBasicBlock::iterator MI, const DebugLoc &DL,
+                           unsigned DestReg, unsigned SrcReg,
+                           bool KillSrc) const {
+    llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
+  }
+
+  /// Store the specified register of the given register class to the specified
+  /// stack frame index. The store instruction is to be added to the given
+  /// machine basic block before the specified machine instruction. If isKill
+  /// is true, the register operand is the last use and must be marked kill.
+  virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
+                                   MachineBasicBlock::iterator MI,
+                                   unsigned SrcReg, bool isKill, int FrameIndex,
+                                   const TargetRegisterClass *RC,
+                                   const TargetRegisterInfo *TRI) const {
+    llvm_unreachable("Target didn't implement "
+                     "TargetInstrInfo::storeRegToStackSlot!");
+  }
+
+  /// Load the specified register of the given register class from the specified
+  /// stack frame index. The load instruction is to be added to the given
+  /// machine basic block before the specified machine instruction.
+  virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
+                                    MachineBasicBlock::iterator MI,
+                                    unsigned DestReg, int FrameIndex,
+                                    const TargetRegisterClass *RC,
+                                    const TargetRegisterInfo *TRI) const {
+    llvm_unreachable("Target didn't implement "
+                     "TargetInstrInfo::loadRegFromStackSlot!");
+  }
+
+  /// This function is called for all pseudo instructions
+  /// that remain after register allocation. Many pseudo instructions are
+  /// created to help register allocation. This is the place to convert them
+  /// into real instructions. The target can edit MI in place, or it can insert
+  /// new instructions and erase MI. The function should return true if
+  /// anything was changed.
+  virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; }
+
+  /// Check whether the target can fold a load that feeds a subreg operand
+  /// (or a subreg operand that feeds a store).
+  /// For example, X86 may want to return true if it can fold
+  /// movl (%esp), %eax
+  /// subb, %al, ...
+  /// Into:
+  /// subb (%esp), ...
+  ///
+  /// Ideally, we'd like the target implementation of foldMemoryOperand() to
+  /// reject subregs - but since this behavior used to be enforced in the
+  /// target-independent code, moving this responsibility to the targets
+  /// has the potential of causing nasty silent breakage in out-of-tree targets.
+  virtual bool isSubregFoldable() const { return false; }
+
+  /// Attempt to fold a load or store of the specified stack
+  /// slot into the specified machine instruction for the specified operand(s).
+  /// If this is possible, a new instruction is returned with the specified
+  /// operand folded, otherwise NULL is returned.
+  /// The new instruction is inserted before MI, and the client is responsible
+  /// for removing the old instruction.
+  MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
+                                  int FrameIndex,
+                                  LiveIntervals *LIS = nullptr) const;
+
+  /// Same as the previous version except it allows folding of any load and
+  /// store from / to any address, not just from a specific stack slot.
+  MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
+                                  MachineInstr &LoadMI,
+                                  LiveIntervals *LIS = nullptr) const;
+
+  /// Return true when there is potentially a faster code sequence
+  /// for an instruction chain ending in \p Root. All potential patterns are
+  /// returned in the \p Pattern vector. Pattern should be sorted in priority
+  /// order since the pattern evaluator stops checking as soon as it finds a
+  /// faster sequence.
+  /// \param Root - Instruction that could be combined with one of its operands
+  /// \param Patterns - Vector of possible combination patterns
+  virtual bool getMachineCombinerPatterns(
+      MachineInstr &Root,
+      SmallVectorImpl<MachineCombinerPattern> &Patterns) const;
+
+  /// Return true when a code sequence can improve throughput. It
+  /// should be called only for instructions in loops.
+  /// \param Pattern - combiner pattern
+  virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const;
+
+  /// Return true if the input \P Inst is part of a chain of dependent ops
+  /// that are suitable for reassociation, otherwise return false.
+  /// If the instruction's operands must be commuted to have a previous
+  /// instruction of the same type define the first source operand, \P Commuted
+  /// will be set to true.
+  bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const;
+
+  /// Return true when \P Inst is both associative and commutative.
+  virtual bool isAssociativeAndCommutative(const MachineInstr &Inst) const {
+    return false;
+  }
+
+  /// Return true when \P Inst has reassociable operands in the same \P MBB.
+  virtual bool hasReassociableOperands(const MachineInstr &Inst,
+                                       const MachineBasicBlock *MBB) const;
+
+  /// Return true when \P Inst has reassociable sibling.
+  bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const;
+
+  /// When getMachineCombinerPatterns() finds patterns, this function generates
+  /// the instructions that could replace the original code sequence. The client
+  /// has to decide whether the actual replacement is beneficial or not.
+  /// \param Root - Instruction that could be combined with one of its operands
+  /// \param Pattern - Combination pattern for Root
+  /// \param InsInstrs - Vector of new instructions that implement P
+  /// \param DelInstrs - Old instructions, including Root, that could be
+  /// replaced by InsInstr
+  /// \param InstrIdxForVirtReg - map of virtual register to instruction in
+  /// InsInstr that defines it
+  virtual void genAlternativeCodeSequence(
+      MachineInstr &Root, MachineCombinerPattern Pattern,
+      SmallVectorImpl<MachineInstr *> &InsInstrs,
+      SmallVectorImpl<MachineInstr *> &DelInstrs,
+      DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
+
+  /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to
+  /// reduce critical path length.
+  void reassociateOps(MachineInstr &Root, MachineInstr &Prev,
+                      MachineCombinerPattern Pattern,
+                      SmallVectorImpl<MachineInstr *> &InsInstrs,
+                      SmallVectorImpl<MachineInstr *> &DelInstrs,
+                      DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
+
+  /// This is an architecture-specific helper function of reassociateOps.
+  /// Set special operand attributes for new instructions after reassociation.
+  virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
+                                     MachineInstr &NewMI1,
+                                     MachineInstr &NewMI2) const {}
+
+  /// Return true when a target supports MachineCombiner.
+  virtual bool useMachineCombiner() const { return false; }
+
+  /// Return true if the given SDNode can be copied during scheduling
+  /// even if it has glue.
+  virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const { return false; }
+
+  /// Remember what registers the specified instruction uses and modifies.
+  virtual void trackRegDefsUses(const MachineInstr &MI, BitVector &ModifiedRegs,
+                                BitVector &UsedRegs,
+                                const TargetRegisterInfo *TRI) const;
+
+protected:
+  /// Target-dependent implementation for foldMemoryOperand.
+  /// Target-independent code in foldMemoryOperand will
+  /// take care of adding a MachineMemOperand to the newly created instruction.
+  /// The instruction and any auxiliary instructions necessary will be inserted
+  /// at InsertPt.
+  virtual MachineInstr *
+  foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
+                        ArrayRef<unsigned> Ops,
+                        MachineBasicBlock::iterator InsertPt, int FrameIndex,
+                        LiveIntervals *LIS = nullptr) const {
+    return nullptr;
+  }
+
+  /// Target-dependent implementation for foldMemoryOperand.
+  /// Target-independent code in foldMemoryOperand will
+  /// take care of adding a MachineMemOperand to the newly created instruction.
+  /// The instruction and any auxiliary instructions necessary will be inserted
+  /// at InsertPt.
+  virtual MachineInstr *foldMemoryOperandImpl(
+      MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
+      MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
+      LiveIntervals *LIS = nullptr) const {
+    return nullptr;
+  }
+
+  /// \brief Target-dependent implementation of getRegSequenceInputs.
+  ///
+  /// \returns true if it is possible to build the equivalent
+  /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise.
+  ///
+  /// \pre MI.isRegSequenceLike().
+  ///
+  /// \see TargetInstrInfo::getRegSequenceInputs.
+  virtual bool getRegSequenceLikeInputs(
+      const MachineInstr &MI, unsigned DefIdx,
+      SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
+    return false;
+  }
+
+  /// \brief Target-dependent implementation of getExtractSubregInputs.
+  ///
+  /// \returns true if it is possible to build the equivalent
+  /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
+  ///
+  /// \pre MI.isExtractSubregLike().
+  ///
+  /// \see TargetInstrInfo::getExtractSubregInputs.
+  virtual bool getExtractSubregLikeInputs(const MachineInstr &MI,
+                                          unsigned DefIdx,
+                                          RegSubRegPairAndIdx &InputReg) const {
+    return false;
+  }
+
+  /// \brief Target-dependent implementation of getInsertSubregInputs.
+  ///
+  /// \returns true if it is possible to build the equivalent
+  /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
+  ///
+  /// \pre MI.isInsertSubregLike().
+  ///
+  /// \see TargetInstrInfo::getInsertSubregInputs.
+  virtual bool
+  getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx,
+                            RegSubRegPair &BaseReg,
+                            RegSubRegPairAndIdx &InsertedReg) const {
+    return false;
+  }
+
+public:
+  /// getAddressSpaceForPseudoSourceKind - Given the kind of memory
+  /// (e.g. stack) the target returns the corresponding address space.
+  virtual unsigned
+  getAddressSpaceForPseudoSourceKind(PseudoSourceValue::PSVKind Kind) const {
+    return 0;
+  }
+
+  /// unfoldMemoryOperand - Separate a single instruction which folded a load or
+  /// a store or a load and a store into two or more instruction. If this is
+  /// possible, returns true as well as the new instructions by reference.
+  virtual bool
+  unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg,
+                      bool UnfoldLoad, bool UnfoldStore,
+                      SmallVectorImpl<MachineInstr *> &NewMIs) const {
+    return false;
+  }
+
+  virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
+                                   SmallVectorImpl<SDNode *> &NewNodes) const {
+    return false;
+  }
+
+  /// Returns the opcode of the would be new
+  /// instruction after load / store are unfolded from an instruction of the
+  /// specified opcode. It returns zero if the specified unfolding is not
+  /// possible. If LoadRegIndex is non-null, it is filled in with the operand
+  /// index of the operand which will hold the register holding the loaded
+  /// value.
+  virtual unsigned
+  getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore,
+                             unsigned *LoadRegIndex = nullptr) const {
+    return 0;
+  }
+
+  /// This is used by the pre-regalloc scheduler to determine if two loads are
+  /// loading from the same base address. It should only return true if the base
+  /// pointers are the same and the only differences between the two addresses
+  /// are the offset. It also returns the offsets by reference.
+  virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
+                                       int64_t &Offset1,
+                                       int64_t &Offset2) const {
+    return false;
+  }
+
+  /// This is a used by the pre-regalloc scheduler to determine (in conjunction
+  /// with areLoadsFromSameBasePtr) if two loads should be scheduled together.
+  /// On some targets if two loads are loading from
+  /// addresses in the same cache line, it's better if they are scheduled
+  /// together. This function takes two integers that represent the load offsets
+  /// from the common base address. It returns true if it decides it's desirable
+  /// to schedule the two loads together. "NumLoads" is the number of loads that
+  /// have already been scheduled after Load1.
+  virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
+                                       int64_t Offset1, int64_t Offset2,
+                                       unsigned NumLoads) const {
+    return false;
+  }
+
+  /// Get the base register and byte offset of an instruction that reads/writes
+  /// memory.
+  virtual bool getMemOpBaseRegImmOfs(MachineInstr &MemOp, unsigned &BaseReg,
+                                     int64_t &Offset,
+                                     const TargetRegisterInfo *TRI) const {
+    return false;
+  }
+
+  /// Return true if the instruction contains a base register and offset. If
+  /// true, the function also sets the operand position in the instruction
+  /// for the base register and offset.
+  virtual bool getBaseAndOffsetPosition(const MachineInstr &MI,
+                                        unsigned &BasePos,
+                                        unsigned &OffsetPos) const {
+    return false;
+  }
+
+  /// If the instruction is an increment of a constant value, return the amount.
+  virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const {
+    return false;
+  }
+
+  /// Returns true if the two given memory operations should be scheduled
+  /// adjacent. Note that you have to add:
+  ///   DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
+  /// or
+  ///   DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
+  /// to TargetPassConfig::createMachineScheduler() to have an effect.
+  virtual bool shouldClusterMemOps(MachineInstr &FirstLdSt, unsigned BaseReg1,
+                                   MachineInstr &SecondLdSt, unsigned BaseReg2,
+                                   unsigned NumLoads) const {
+    llvm_unreachable("target did not implement shouldClusterMemOps()");
+  }
+
+  /// Reverses the branch condition of the specified condition list,
+  /// returning false on success and true if it cannot be reversed.
+  virtual bool
+  reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
+    return true;
+  }
+
+  /// Insert a noop into the instruction stream at the specified point.
+  virtual void insertNoop(MachineBasicBlock &MBB,
+                          MachineBasicBlock::iterator MI) const;
+
+  /// Return the noop instruction to use for a noop.
+  virtual void getNoop(MCInst &NopInst) const;
+
+  /// Return true for post-incremented instructions.
+  virtual bool isPostIncrement(const MachineInstr &MI) const { return false; }
+
+  /// Returns true if the instruction is already predicated.
+  virtual bool isPredicated(const MachineInstr &MI) const { return false; }
+
+  /// Returns true if the instruction is a
+  /// terminator instruction that has not been predicated.
+  virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const;
+
+  /// Returns true if MI is an unconditional tail call.
+  virtual bool isUnconditionalTailCall(const MachineInstr &MI) const {
+    return false;
+  }
+
+  /// Returns true if the tail call can be made conditional on BranchCond.
+  virtual bool canMakeTailCallConditional(SmallVectorImpl<MachineOperand> &Cond,
+                                          const MachineInstr &TailCall) const {
+    return false;
+  }
+
+  /// Replace the conditional branch in MBB with a conditional tail call.
+  virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB,
+                                         SmallVectorImpl<MachineOperand> &Cond,
+                                         const MachineInstr &TailCall) const {
+    llvm_unreachable("Target didn't implement replaceBranchWithTailCall!");
+  }
+
+  /// Convert the instruction into a predicated instruction.
+  /// It returns true if the operation was successful.
+  virtual bool PredicateInstruction(MachineInstr &MI,
+                                    ArrayRef<MachineOperand> Pred) const;
+
+  /// Returns true if the first specified predicate
+  /// subsumes the second, e.g. GE subsumes GT.
+  virtual bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
+                                 ArrayRef<MachineOperand> Pred2) const {
+    return false;
+  }
+
+  /// If the specified instruction defines any predicate
+  /// or condition code register(s) used for predication, returns true as well
+  /// as the definition predicate(s) by reference.
+  virtual bool DefinesPredicate(MachineInstr &MI,
+                                std::vector<MachineOperand> &Pred) const {
+    return false;
+  }
+
+  /// Return true if the specified instruction can be predicated.
+  /// By default, this returns true for every instruction with a
+  /// PredicateOperand.
+  virtual bool isPredicable(const MachineInstr &MI) const {
+    return MI.getDesc().isPredicable();
+  }
+
+  /// Return true if it's safe to move a machine
+  /// instruction that defines the specified register class.
+  virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
+    return true;
+  }
+
+  /// Test if the given instruction should be considered a scheduling boundary.
+  /// This primarily includes labels and terminators.
+  virtual bool isSchedulingBoundary(const MachineInstr &MI,
+                                    const MachineBasicBlock *MBB,
+                                    const MachineFunction &MF) const;
+
+  /// Measure the specified inline asm to determine an approximation of its
+  /// length.
+  virtual unsigned getInlineAsmLength(const char *Str,
+                                      const MCAsmInfo &MAI) const;
+
+  /// Allocate and return a hazard recognizer to use for this target when
+  /// scheduling the machine instructions before register allocation.
+  virtual ScheduleHazardRecognizer *
+  CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
+                               const ScheduleDAG *DAG) const;
+
+  /// Allocate and return a hazard recognizer to use for this target when
+  /// scheduling the machine instructions before register allocation.
+  virtual ScheduleHazardRecognizer *
+  CreateTargetMIHazardRecognizer(const InstrItineraryData *,
+                                 const ScheduleDAG *DAG) const;
+
+  /// Allocate and return a hazard recognizer to use for this target when
+  /// scheduling the machine instructions after register allocation.
+  virtual ScheduleHazardRecognizer *
+  CreateTargetPostRAHazardRecognizer(const InstrItineraryData *,
+                                     const ScheduleDAG *DAG) const;
+
+  /// Allocate and return a hazard recognizer to use for by non-scheduling
+  /// passes.
+  virtual ScheduleHazardRecognizer *
+  CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const {
+    return nullptr;
+  }
+
+  /// Provide a global flag for disabling the PreRA hazard recognizer that
+  /// targets may choose to honor.
+  bool usePreRAHazardRecognizer() const;
+
+  /// For a comparison instruction, return the source registers
+  /// in SrcReg and SrcReg2 if having two register operands, and the value it
+  /// compares against in CmpValue. Return true if the comparison instruction
+  /// can be analyzed.
+  virtual bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
+                              unsigned &SrcReg2, int &Mask, int &Value) const {
+    return false;
+  }
+
+  /// See if the comparison instruction can be converted
+  /// into something more efficient. E.g., on ARM most instructions can set the
+  /// flags register, obviating the need for a separate CMP.
+  virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
+                                    unsigned SrcReg2, int Mask, int Value,
+                                    const MachineRegisterInfo *MRI) const {
+    return false;
+  }
+  virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; }
+
+  /// Try to remove the load by folding it to a register operand at the use.
+  /// We fold the load instructions if and only if the
+  /// def and use are in the same BB. We only look at one load and see
+  /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
+  /// defined by the load we are trying to fold. DefMI returns the machine
+  /// instruction that defines FoldAsLoadDefReg, and the function returns
+  /// the machine instruction generated due to folding.
+  virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI,
+                                          const MachineRegisterInfo *MRI,
+                                          unsigned &FoldAsLoadDefReg,
+                                          MachineInstr *&DefMI) const {
+    return nullptr;
+  }
+
+  /// 'Reg' is known to be defined by a move immediate instruction,
+  /// try to fold the immediate into the use instruction.
+  /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true,
+  /// then the caller may assume that DefMI has been erased from its parent
+  /// block. The caller may assume that it will not be erased by this
+  /// function otherwise.
+  virtual bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
+                             unsigned Reg, MachineRegisterInfo *MRI) const {
+    return false;
+  }
+
+  /// Return the number of u-operations the given machine
+  /// instruction will be decoded to on the target cpu. The itinerary's
+  /// IssueWidth is the number of microops that can be dispatched each
+  /// cycle. An instruction with zero microops takes no dispatch resources.
+  virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
+                                  const MachineInstr &MI) const;
+
+  /// Return true for pseudo instructions that don't consume any
+  /// machine resources in their current form. These are common cases that the
+  /// scheduler should consider free, rather than conservatively handling them
+  /// as instructions with no itinerary.
+  bool isZeroCost(unsigned Opcode) const {
+    return Opcode <= TargetOpcode::COPY;
+  }
+
+  virtual int getOperandLatency(const InstrItineraryData *ItinData,
+                                SDNode *DefNode, unsigned DefIdx,
+                                SDNode *UseNode, unsigned UseIdx) const;
+
+  /// Compute and return the use operand latency of a given pair of def and use.
+  /// In most cases, the static scheduling itinerary was enough to determine the
+  /// operand latency. But it may not be possible for instructions with variable
+  /// number of defs / uses.
+  ///
+  /// This is a raw interface to the itinerary that may be directly overridden
+  /// by a target. Use computeOperandLatency to get the best estimate of
+  /// latency.
+  virtual int getOperandLatency(const InstrItineraryData *ItinData,
+                                const MachineInstr &DefMI, unsigned DefIdx,
+                                const MachineInstr &UseMI,
+                                unsigned UseIdx) const;
+
+  /// Compute the instruction latency of a given instruction.
+  /// If the instruction has higher cost when predicated, it's returned via
+  /// PredCost.
+  virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
+                                   const MachineInstr &MI,
+                                   unsigned *PredCost = nullptr) const;
+
+  virtual unsigned getPredicationCost(const MachineInstr &MI) const;
+
+  virtual int getInstrLatency(const InstrItineraryData *ItinData,
+                              SDNode *Node) const;
+
+  /// Return the default expected latency for a def based on its opcode.
+  unsigned defaultDefLatency(const MCSchedModel &SchedModel,
+                             const MachineInstr &DefMI) const;
+
+  int computeDefOperandLatency(const InstrItineraryData *ItinData,
+                               const MachineInstr &DefMI) const;
+
+  /// Return true if this opcode has high latency to its result.
+  virtual bool isHighLatencyDef(int opc) const { return false; }
+
+  /// Compute operand latency between a def of 'Reg'
+  /// and a use in the current loop. Return true if the target considered
+  /// it 'high'. This is used by optimization passes such as machine LICM to
+  /// determine whether it makes sense to hoist an instruction out even in a
+  /// high register pressure situation.
+  virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
+                                     const MachineRegisterInfo *MRI,
+                                     const MachineInstr &DefMI, unsigned DefIdx,
+                                     const MachineInstr &UseMI,
+                                     unsigned UseIdx) const {
+    return false;
+  }
+
+  /// Compute operand latency of a def of 'Reg'. Return true
+  /// if the target considered it 'low'.
+  virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel,
+                                const MachineInstr &DefMI,
+                                unsigned DefIdx) const;
+
+  /// Perform target-specific instruction verification.
+  virtual bool verifyInstruction(const MachineInstr &MI,
+                                 StringRef &ErrInfo) const {
+    return true;
+  }
+
+  /// Return the current execution domain and bit mask of
+  /// possible domains for instruction.
+  ///
+  /// Some micro-architectures have multiple execution domains, and multiple
+  /// opcodes that perform the same operation in different domains.  For
+  /// example, the x86 architecture provides the por, orps, and orpd
+  /// instructions that all do the same thing.  There is a latency penalty if a
+  /// register is written in one domain and read in another.
+  ///
+  /// This function returns a pair (domain, mask) containing the execution
+  /// domain of MI, and a bit mask of possible domains.  The setExecutionDomain
+  /// function can be used to change the opcode to one of the domains in the
+  /// bit mask.  Instructions whose execution domain can't be changed should
+  /// return a 0 mask.
+  ///
+  /// The execution domain numbers don't have any special meaning except domain
+  /// 0 is used for instructions that are not associated with any interesting
+  /// execution domain.
+  ///
+  virtual std::pair<uint16_t, uint16_t>
+  getExecutionDomain(const MachineInstr &MI) const {
+    return std::make_pair(0, 0);
+  }
+
+  /// Change the opcode of MI to execute in Domain.
+  ///
+  /// The bit (1 << Domain) must be set in the mask returned from
+  /// getExecutionDomain(MI).
+  virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {}
+
+  /// Returns the preferred minimum clearance
+  /// before an instruction with an unwanted partial register update.
+  ///
+  /// Some instructions only write part of a register, and implicitly need to
+  /// read the other parts of the register.  This may cause unwanted stalls
+  /// preventing otherwise unrelated instructions from executing in parallel in
+  /// an out-of-order CPU.
+  ///
+  /// For example, the x86 instruction cvtsi2ss writes its result to bits
+  /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so
+  /// the instruction needs to wait for the old value of the register to become
+  /// available:
+  ///
+  ///   addps %xmm1, %xmm0
+  ///   movaps %xmm0, (%rax)
+  ///   cvtsi2ss %rbx, %xmm0
+  ///
+  /// In the code above, the cvtsi2ss instruction needs to wait for the addps
+  /// instruction before it can issue, even though the high bits of %xmm0
+  /// probably aren't needed.
+  ///
+  /// This hook returns the preferred clearance before MI, measured in
+  /// instructions.  Other defs of MI's operand OpNum are avoided in the last N
+  /// instructions before MI.  It should only return a positive value for
+  /// unwanted dependencies.  If the old bits of the defined register have
+  /// useful values, or if MI is determined to otherwise read the dependency,
+  /// the hook should return 0.
+  ///
+  /// The unwanted dependency may be handled by:
+  ///
+  /// 1. Allocating the same register for an MI def and use.  That makes the
+  ///    unwanted dependency identical to a required dependency.
+  ///
+  /// 2. Allocating a register for the def that has no defs in the previous N
+  ///    instructions.
+  ///
+  /// 3. Calling breakPartialRegDependency() with the same arguments.  This
+  ///    allows the target to insert a dependency breaking instruction.
+  ///
+  virtual unsigned
+  getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum,
+                               const TargetRegisterInfo *TRI) const {
+    // The default implementation returns 0 for no partial register dependency.
+    return 0;
+  }
+
+  /// \brief Return the minimum clearance before an instruction that reads an
+  /// unused register.
+  ///
+  /// For example, AVX instructions may copy part of a register operand into
+  /// the unused high bits of the destination register.
+  ///
+  /// vcvtsi2sdq %rax, undef %xmm0, %xmm14
+  ///
+  /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a
+  /// false dependence on any previous write to %xmm0.
+  ///
+  /// This hook works similarly to getPartialRegUpdateClearance, except that it
+  /// does not take an operand index. Instead sets \p OpNum to the index of the
+  /// unused register.
+  virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
+                                        const TargetRegisterInfo *TRI) const {
+    // The default implementation returns 0 for no undef register dependency.
+    return 0;
+  }
+
+  /// Insert a dependency-breaking instruction
+  /// before MI to eliminate an unwanted dependency on OpNum.
+  ///
+  /// If it wasn't possible to avoid a def in the last N instructions before MI
+  /// (see getPartialRegUpdateClearance), this hook will be called to break the
+  /// unwanted dependency.
+  ///
+  /// On x86, an xorps instruction can be used as a dependency breaker:
+  ///
+  ///   addps %xmm1, %xmm0
+  ///   movaps %xmm0, (%rax)
+  ///   xorps %xmm0, %xmm0
+  ///   cvtsi2ss %rbx, %xmm0
+  ///
+  /// An <imp-kill> operand should be added to MI if an instruction was
+  /// inserted.  This ties the instructions together in the post-ra scheduler.
+  ///
+  virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
+                                         const TargetRegisterInfo *TRI) const {}
+
+  /// Create machine specific model for scheduling.
+  virtual DFAPacketizer *
+  CreateTargetScheduleState(const TargetSubtargetInfo &) const {
+    return nullptr;
+  }
+
+  /// Sometimes, it is possible for the target
+  /// to tell, even without aliasing information, that two MIs access different
+  /// memory addresses. This function returns true if two MIs access different
+  /// memory addresses and false otherwise.
+  ///
+  /// Assumes any physical registers used to compute addresses have the same
+  /// value for both instructions. (This is the most useful assumption for
+  /// post-RA scheduling.)
+  ///
+  /// See also MachineInstr::mayAlias, which is implemented on top of this
+  /// function.
+  virtual bool
+  areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
+                                  AliasAnalysis *AA = nullptr) const {
+    assert((MIa.mayLoad() || MIa.mayStore()) &&
+           "MIa must load from or modify a memory location");
+    assert((MIb.mayLoad() || MIb.mayStore()) &&
+           "MIb must load from or modify a memory location");
+    return false;
+  }
+
+  /// \brief Return the value to use for the MachineCSE's LookAheadLimit,
+  /// which is a heuristic used for CSE'ing phys reg defs.
+  virtual unsigned getMachineCSELookAheadLimit() const {
+    // The default lookahead is small to prevent unprofitable quadratic
+    // behavior.
+    return 5;
+  }
+
+  /// Return an array that contains the ids of the target indices (used for the
+  /// TargetIndex machine operand) and their names.
+  ///
+  /// MIR Serialization is able to serialize only the target indices that are
+  /// defined by this method.
+  virtual ArrayRef<std::pair<int, const char *>>
+  getSerializableTargetIndices() const {
+    return None;
+  }
+
+  /// Decompose the machine operand's target flags into two values - the direct
+  /// target flag value and any of bit flags that are applied.
+  virtual std::pair<unsigned, unsigned>
+  decomposeMachineOperandsTargetFlags(unsigned /*TF*/) const {
+    return std::make_pair(0u, 0u);
+  }
+
+  /// Return an array that contains the direct target flag values and their
+  /// names.
+  ///
+  /// MIR Serialization is able to serialize only the target flags that are
+  /// defined by this method.
+  virtual ArrayRef<std::pair<unsigned, const char *>>
+  getSerializableDirectMachineOperandTargetFlags() const {
+    return None;
+  }
+
+  /// Return an array that contains the bitmask target flag values and their
+  /// names.
+  ///
+  /// MIR Serialization is able to serialize only the target flags that are
+  /// defined by this method.
+  virtual ArrayRef<std::pair<unsigned, const char *>>
+  getSerializableBitmaskMachineOperandTargetFlags() const {
+    return None;
+  }
+
+  /// Return an array that contains the MMO target flag values and their
+  /// names.
+  ///
+  /// MIR Serialization is able to serialize only the MMO target flags that are
+  /// defined by this method.
+  virtual ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
+  getSerializableMachineMemOperandTargetFlags() const {
+    return None;
+  }
+
+  /// Determines whether \p Inst is a tail call instruction. Override this
+  /// method on targets that do not properly set MCID::Return and MCID::Call on
+  /// tail call instructions."
+  virtual bool isTailCall(const MachineInstr &Inst) const {
+    return Inst.isReturn() && Inst.isCall();
+  }
+
+  /// True if the instruction is bound to the top of its basic block and no
+  /// other instructions shall be inserted before it. This can be implemented
+  /// to prevent register allocator to insert spills before such instructions.
+  virtual bool isBasicBlockPrologue(const MachineInstr &MI) const {
+    return false;
+  }
+
+  /// \brief Describes the number of instructions that it will take to call and
+  /// construct a frame for a given outlining candidate.
+  struct MachineOutlinerInfo {
+    /// Number of instructions to call an outlined function for this candidate.
+    unsigned CallOverhead;
+
+    /// \brief Number of instructions to construct an outlined function frame
+    /// for this candidate.
+    unsigned FrameOverhead;
+
+    /// \brief Represents the specific instructions that must be emitted to
+    /// construct a call to this candidate.
+    unsigned CallConstructionID;
+
+    /// \brief Represents the specific instructions that must be emitted to
+    /// construct a frame for this candidate's outlined function.
+    unsigned FrameConstructionID;
+
+    MachineOutlinerInfo() {}
+    MachineOutlinerInfo(unsigned CallOverhead, unsigned FrameOverhead,
+                        unsigned CallConstructionID,
+                        unsigned FrameConstructionID)
+        : CallOverhead(CallOverhead), FrameOverhead(FrameOverhead),
+          CallConstructionID(CallConstructionID),
+          FrameConstructionID(FrameConstructionID) {}
+  };
+
+  /// \brief Returns a \p MachineOutlinerInfo struct containing target-specific
+  /// information for a set of outlining candidates.
+  virtual MachineOutlinerInfo getOutlininingCandidateInfo(
+      std::vector<
+          std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
+          &RepeatedSequenceLocs) const {
+    llvm_unreachable(
+        "Target didn't implement TargetInstrInfo::getOutliningOverhead!");
+  }
+
+  /// Represents how an instruction should be mapped by the outliner.
+  /// \p Legal instructions are those which are safe to outline.
+  /// \p Illegal instructions are those which cannot be outlined.
+  /// \p Invisible instructions are instructions which can be outlined, but
+  /// shouldn't actually impact the outlining result.
+  enum MachineOutlinerInstrType { Legal, Illegal, Invisible };
+
+  /// Returns how or if \p MI should be outlined.
+  virtual MachineOutlinerInstrType
+  getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const {
+    llvm_unreachable(
+        "Target didn't implement TargetInstrInfo::getOutliningType!");
+  }
+
+  /// \brief Returns target-defined flags defining properties of the MBB for
+  /// the outliner.
+  virtual unsigned getMachineOutlinerMBBFlags(MachineBasicBlock &MBB) const {
+    return 0x0;
+  }
+
+  /// Insert a custom epilogue for outlined functions.
+  /// This may be empty, in which case no epilogue or return statement will be
+  /// emitted.
+  virtual void insertOutlinerEpilogue(MachineBasicBlock &MBB,
+                                      MachineFunction &MF,
+                                      const MachineOutlinerInfo &MInfo) const {
+    llvm_unreachable(
+        "Target didn't implement TargetInstrInfo::insertOutlinerEpilogue!");
+  }
+
+  /// Insert a call to an outlined function into the program.
+  /// Returns an iterator to the spot where we inserted the call. This must be
+  /// implemented by the target.
+  virtual MachineBasicBlock::iterator
+  insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
+                     MachineBasicBlock::iterator &It, MachineFunction &MF,
+                     const MachineOutlinerInfo &MInfo) const {
+    llvm_unreachable(
+        "Target didn't implement TargetInstrInfo::insertOutlinedCall!");
+  }
+
+  /// Insert a custom prologue for outlined functions.
+  /// This may be empty, in which case no prologue will be emitted.
+  virtual void insertOutlinerPrologue(MachineBasicBlock &MBB,
+                                      MachineFunction &MF,
+                                      const MachineOutlinerInfo &MInfo) const {
+    llvm_unreachable(
+        "Target didn't implement TargetInstrInfo::insertOutlinerPrologue!");
+  }
+
+  /// Return true if the function can safely be outlined from.
+  /// A function \p MF is considered safe for outlining if an outlined function
+  /// produced from instructions in F will produce a program which produces the
+  /// same output for any set of given inputs.
+  virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
+                                           bool OutlineFromLinkOnceODRs) const {
+    llvm_unreachable("Target didn't implement "
+                     "TargetInstrInfo::isFunctionSafeToOutlineFrom!");
+  }
+
+private:
+  unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
+  unsigned CatchRetOpcode;
+  unsigned ReturnOpcode;
+};
+
+/// \brief Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair.
+template <> struct DenseMapInfo<TargetInstrInfo::RegSubRegPair> {
+  using RegInfo = DenseMapInfo<unsigned>;
+
+  static inline TargetInstrInfo::RegSubRegPair getEmptyKey() {
+    return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(),
+                                          RegInfo::getEmptyKey());
+  }
+
+  static inline TargetInstrInfo::RegSubRegPair getTombstoneKey() {
+    return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(),
+                                          RegInfo::getTombstoneKey());
+  }
+
+  /// \brief Reuse getHashValue implementation from
+  /// std::pair<unsigned, unsigned>.
+  static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) {
+    std::pair<unsigned, unsigned> PairVal = std::make_pair(Val.Reg, Val.SubReg);
+    return DenseMapInfo<std::pair<unsigned, unsigned>>::getHashValue(PairVal);
+  }
+
+  static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS,
+                      const TargetInstrInfo::RegSubRegPair &RHS) {
+    return RegInfo::isEqual(LHS.Reg, RHS.Reg) &&
+           RegInfo::isEqual(LHS.SubReg, RHS.SubReg);
+  }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TARGET_TARGETINSTRINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetLowering.h b/linux-x64/clang/include/llvm/CodeGen/TargetLowering.h
new file mode 100644
index 0000000..483223a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetLowering.h
@@ -0,0 +1,3615 @@
+//===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file describes how to lower LLVM code to machine code.  This has two
+/// main components:
+///
+///  1. Which ValueTypes are natively supported by the target.
+///  2. Which operations are supported for supported ValueTypes.
+///  3. Cost thresholds for alternative implementations of certain operations.
+///
+/// In addition it has a few other components, like information about FP
+/// immediates.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETLOWERING_H
+#define LLVM_CODEGEN_TARGETLOWERING_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/DivergenceAnalysis.h"
+#include "llvm/CodeGen/DAGCombine.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/RuntimeLibcalls.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/CodeGen/TargetCallingConv.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Type.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachineValueType.h"
+#include "llvm/Target/TargetMachine.h"
+#include <algorithm>
+#include <cassert>
+#include <climits>
+#include <cstdint>
+#include <iterator>
+#include <map>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class BranchProbability;
+class CCState;
+class CCValAssign;
+class Constant;
+class FastISel;
+class FunctionLoweringInfo;
+class GlobalValue;
+class IntrinsicInst;
+struct KnownBits;
+class LLVMContext;
+class MachineBasicBlock;
+class MachineFunction;
+class MachineInstr;
+class MachineJumpTableInfo;
+class MachineLoop;
+class MachineRegisterInfo;
+class MCContext;
+class MCExpr;
+class Module;
+class TargetRegisterClass;
+class TargetLibraryInfo;
+class TargetRegisterInfo;
+class Value;
+
+namespace Sched {
+
+  enum Preference {
+    None,             // No preference
+    Source,           // Follow source order.
+    RegPressure,      // Scheduling for lowest register pressure.
+    Hybrid,           // Scheduling for both latency and register pressure.
+    ILP,              // Scheduling for ILP in low register pressure mode.
+    VLIW              // Scheduling for VLIW targets.
+  };
+
+} // end namespace Sched
+
+/// This base class for TargetLowering contains the SelectionDAG-independent
+/// parts that can be used from the rest of CodeGen.
+class TargetLoweringBase {
+public:
+  /// This enum indicates whether operations are valid for a target, and if not,
+  /// what action should be used to make them valid.
+  enum LegalizeAction : uint8_t {
+    Legal,      // The target natively supports this operation.
+    Promote,    // This operation should be executed in a larger type.
+    Expand,     // Try to expand this to other ops, otherwise use a libcall.
+    LibCall,    // Don't try to expand this to other ops, always use a libcall.
+    Custom      // Use the LowerOperation hook to implement custom lowering.
+  };
+
+  /// This enum indicates whether a types are legal for a target, and if not,
+  /// what action should be used to make them valid.
+  enum LegalizeTypeAction : uint8_t {
+    TypeLegal,           // The target natively supports this type.
+    TypePromoteInteger,  // Replace this integer with a larger one.
+    TypeExpandInteger,   // Split this integer into two of half the size.
+    TypeSoftenFloat,     // Convert this float to a same size integer type,
+                         // if an operation is not supported in target HW.
+    TypeExpandFloat,     // Split this float into two of half the size.
+    TypeScalarizeVector, // Replace this one-element vector with its element.
+    TypeSplitVector,     // Split this vector into two of half the size.
+    TypeWidenVector,     // This vector should be widened into a larger vector.
+    TypePromoteFloat     // Replace this float with a larger one.
+  };
+
+  /// LegalizeKind holds the legalization kind that needs to happen to EVT
+  /// in order to type-legalize it.
+  using LegalizeKind = std::pair<LegalizeTypeAction, EVT>;
+
+  /// Enum that describes how the target represents true/false values.
+  enum BooleanContent {
+    UndefinedBooleanContent,    // Only bit 0 counts, the rest can hold garbage.
+    ZeroOrOneBooleanContent,        // All bits zero except for bit 0.
+    ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
+  };
+
+  /// Enum that describes what type of support for selects the target has.
+  enum SelectSupportKind {
+    ScalarValSelect,      // The target supports scalar selects (ex: cmov).
+    ScalarCondVectorVal,  // The target supports selects with a scalar condition
+                          // and vector values (ex: cmov).
+    VectorMaskSelect      // The target supports vector selects with a vector
+                          // mask (ex: x86 blends).
+  };
+
+  /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
+  /// to, if at all. Exists because different targets have different levels of
+  /// support for these atomic instructions, and also have different options
+  /// w.r.t. what they should expand to.
+  enum class AtomicExpansionKind {
+    None,    // Don't expand the instruction.
+    LLSC,    // Expand the instruction into loadlinked/storeconditional; used
+             // by ARM/AArch64.
+    LLOnly,  // Expand the (load) instruction into just a load-linked, which has
+             // greater atomic guarantees than a normal load.
+    CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
+  };
+
+  /// Enum that specifies when a multiplication should be expanded.
+  enum class MulExpansionKind {
+    Always,            // Always expand the instruction.
+    OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
+                       // or custom.
+  };
+
+  class ArgListEntry {
+  public:
+    Value *Val = nullptr;
+    SDValue Node = SDValue();
+    Type *Ty = nullptr;
+    bool IsSExt : 1;
+    bool IsZExt : 1;
+    bool IsInReg : 1;
+    bool IsSRet : 1;
+    bool IsNest : 1;
+    bool IsByVal : 1;
+    bool IsInAlloca : 1;
+    bool IsReturned : 1;
+    bool IsSwiftSelf : 1;
+    bool IsSwiftError : 1;
+    uint16_t Alignment = 0;
+
+    ArgListEntry()
+        : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false),
+          IsNest(false), IsByVal(false), IsInAlloca(false), IsReturned(false),
+          IsSwiftSelf(false), IsSwiftError(false) {}
+
+    void setAttributes(ImmutableCallSite *CS, unsigned ArgIdx);
+  };
+  using ArgListTy = std::vector<ArgListEntry>;
+
+  virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC,
+                                     ArgListTy &Args) const {};
+
+  static ISD::NodeType getExtendForContent(BooleanContent Content) {
+    switch (Content) {
+    case UndefinedBooleanContent:
+      // Extend by adding rubbish bits.
+      return ISD::ANY_EXTEND;
+    case ZeroOrOneBooleanContent:
+      // Extend by adding zero bits.
+      return ISD::ZERO_EXTEND;
+    case ZeroOrNegativeOneBooleanContent:
+      // Extend by copying the sign bit.
+      return ISD::SIGN_EXTEND;
+    }
+    llvm_unreachable("Invalid content kind");
+  }
+
+  /// NOTE: The TargetMachine owns TLOF.
+  explicit TargetLoweringBase(const TargetMachine &TM);
+  TargetLoweringBase(const TargetLoweringBase &) = delete;
+  TargetLoweringBase &operator=(const TargetLoweringBase &) = delete;
+  virtual ~TargetLoweringBase() = default;
+
+protected:
+  /// \brief Initialize all of the actions to default values.
+  void initActions();
+
+public:
+  const TargetMachine &getTargetMachine() const { return TM; }
+
+  virtual bool useSoftFloat() const { return false; }
+
+  /// Return the pointer type for the given address space, defaults to
+  /// the pointer type from the data layout.
+  /// FIXME: The default needs to be removed once all the code is updated.
+  MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
+    return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
+  }
+
+  /// Return the type for frame index, which is determined by
+  /// the alloca address space specified through the data layout.
+  MVT getFrameIndexTy(const DataLayout &DL) const {
+    return getPointerTy(DL, DL.getAllocaAddrSpace());
+  }
+
+  /// Return the type for operands of fence.
+  /// TODO: Let fence operands be of i32 type and remove this.
+  virtual MVT getFenceOperandTy(const DataLayout &DL) const {
+    return getPointerTy(DL);
+  }
+
+  /// EVT is not used in-tree, but is used by out-of-tree target.
+  /// A documentation for this function would be nice...
+  virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
+
+  EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
+                       bool LegalTypes = true) const;
+
+  /// Returns the type to be used for the index operand of:
+  /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
+  /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
+  virtual MVT getVectorIdxTy(const DataLayout &DL) const {
+    return getPointerTy(DL);
+  }
+
+  virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
+    return true;
+  }
+
+  /// Return true if multiple condition registers are available.
+  bool hasMultipleConditionRegisters() const {
+    return HasMultipleConditionRegisters;
+  }
+
+  /// Return true if the target has BitExtract instructions.
+  bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
+
+  /// Return the preferred vector type legalization action.
+  virtual TargetLoweringBase::LegalizeTypeAction
+  getPreferredVectorAction(EVT VT) const {
+    // The default action for one element vectors is to scalarize
+    if (VT.getVectorNumElements() == 1)
+      return TypeScalarizeVector;
+    // The default action for other vectors is to promote
+    return TypePromoteInteger;
+  }
+
+  // There are two general methods for expanding a BUILD_VECTOR node:
+  //  1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
+  //     them together.
+  //  2. Build the vector on the stack and then load it.
+  // If this function returns true, then method (1) will be used, subject to
+  // the constraint that all of the necessary shuffles are legal (as determined
+  // by isShuffleMaskLegal). If this function returns false, then method (2) is
+  // always used. The vector type, and the number of defined values, are
+  // provided.
+  virtual bool
+  shouldExpandBuildVectorWithShuffles(EVT /* VT */,
+                                      unsigned DefinedValues) const {
+    return DefinedValues < 3;
+  }
+
+  /// Return true if integer divide is usually cheaper than a sequence of
+  /// several shifts, adds, and multiplies for this target.
+  /// The definition of "cheaper" may depend on whether we're optimizing
+  /// for speed or for size.
+  virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; }
+
+  /// Return true if the target can handle a standalone remainder operation.
+  virtual bool hasStandaloneRem(EVT VT) const {
+    return true;
+  }
+
+  /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
+  virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
+    // Default behavior is to replace SQRT(X) with X*RSQRT(X).
+    return false;
+  }
+
+  /// Reciprocal estimate status values used by the functions below.
+  enum ReciprocalEstimate : int {
+    Unspecified = -1,
+    Disabled = 0,
+    Enabled = 1
+  };
+
+  /// Return a ReciprocalEstimate enum value for a square root of the given type
+  /// based on the function's attributes. If the operation is not overridden by
+  /// the function's attributes, "Unspecified" is returned and target defaults
+  /// are expected to be used for instruction selection.
+  int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const;
+
+  /// Return a ReciprocalEstimate enum value for a division of the given type
+  /// based on the function's attributes. If the operation is not overridden by
+  /// the function's attributes, "Unspecified" is returned and target defaults
+  /// are expected to be used for instruction selection.
+  int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const;
+
+  /// Return the refinement step count for a square root of the given type based
+  /// on the function's attributes. If the operation is not overridden by
+  /// the function's attributes, "Unspecified" is returned and target defaults
+  /// are expected to be used for instruction selection.
+  int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
+
+  /// Return the refinement step count for a division of the given type based
+  /// on the function's attributes. If the operation is not overridden by
+  /// the function's attributes, "Unspecified" is returned and target defaults
+  /// are expected to be used for instruction selection.
+  int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
+
+  /// Returns true if target has indicated at least one type should be bypassed.
+  bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
+
+  /// Returns map of slow types for division or remainder with corresponding
+  /// fast types
+  const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
+    return BypassSlowDivWidths;
+  }
+
+  /// Return true if Flow Control is an expensive operation that should be
+  /// avoided.
+  bool isJumpExpensive() const { return JumpIsExpensive; }
+
+  /// Return true if selects are only cheaper than branches if the branch is
+  /// unlikely to be predicted right.
+  bool isPredictableSelectExpensive() const {
+    return PredictableSelectIsExpensive;
+  }
+
+  /// If a branch or a select condition is skewed in one direction by more than
+  /// this factor, it is very likely to be predicted correctly.
+  virtual BranchProbability getPredictableBranchThreshold() const;
+
+  /// Return true if the following transform is beneficial:
+  /// fold (conv (load x)) -> (load (conv*)x)
+  /// On architectures that don't natively support some vector loads
+  /// efficiently, casting the load to a smaller vector of larger types and
+  /// loading is more efficient, however, this can be undone by optimizations in
+  /// dag combiner.
+  virtual bool isLoadBitCastBeneficial(EVT LoadVT,
+                                       EVT BitcastVT) const {
+    // Don't do if we could do an indexed load on the original type, but not on
+    // the new one.
+    if (!LoadVT.isSimple() || !BitcastVT.isSimple())
+      return true;
+
+    MVT LoadMVT = LoadVT.getSimpleVT();
+
+    // Don't bother doing this if it's just going to be promoted again later, as
+    // doing so might interfere with other combines.
+    if (getOperationAction(ISD::LOAD, LoadMVT) == Promote &&
+        getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT())
+      return false;
+
+    return true;
+  }
+
+  /// Return true if the following transform is beneficial:
+  /// (store (y (conv x)), y*)) -> (store x, (x*))
+  virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT) const {
+    // Default to the same logic as loads.
+    return isLoadBitCastBeneficial(StoreVT, BitcastVT);
+  }
+
+  /// Return true if it is expected to be cheaper to do a store of a non-zero
+  /// vector constant with the given size and type for the address space than to
+  /// store the individual scalar element constants.
+  virtual bool storeOfVectorConstantIsCheap(EVT MemVT,
+                                            unsigned NumElem,
+                                            unsigned AddrSpace) const {
+    return false;
+  }
+
+  /// Allow store merging after legalization in addition to before legalization.
+  /// This may catch stores that do not exist earlier (eg, stores created from
+  /// intrinsics).
+  virtual bool mergeStoresAfterLegalization() const { return true; }
+
+  /// Returns if it's reasonable to merge stores to MemVT size.
+  virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
+                                const SelectionDAG &DAG) const {
+    return true;
+  }
+
+  /// \brief Return true if it is cheap to speculate a call to intrinsic cttz.
+  virtual bool isCheapToSpeculateCttz() const {
+    return false;
+  }
+
+  /// \brief Return true if it is cheap to speculate a call to intrinsic ctlz.
+  virtual bool isCheapToSpeculateCtlz() const {
+    return false;
+  }
+
+  /// \brief Return true if ctlz instruction is fast.
+  virtual bool isCtlzFast() const {
+    return false;
+  }
+
+  /// Return true if it is safe to transform an integer-domain bitwise operation
+  /// into the equivalent floating-point operation. This should be set to true
+  /// if the target has IEEE-754-compliant fabs/fneg operations for the input
+  /// type.
+  virtual bool hasBitPreservingFPLogic(EVT VT) const {
+    return false;
+  }
+
+  /// \brief Return true if it is cheaper to split the store of a merged int val
+  /// from a pair of smaller values into multiple stores.
+  virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
+    return false;
+  }
+
+  /// \brief Return if the target supports combining a
+  /// chain like:
+  /// \code
+  ///   %andResult = and %val1, #mask
+  ///   %icmpResult = icmp %andResult, 0
+  /// \endcode
+  /// into a single machine instruction of a form like:
+  /// \code
+  ///   cc = test %register, #mask
+  /// \endcode
+  virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
+    return false;
+  }
+
+  /// Use bitwise logic to make pairs of compares more efficient. For example:
+  /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
+  /// This should be true when it takes more than one instruction to lower
+  /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
+  /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
+  virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const {
+    return false;
+  }
+
+  /// Return the preferred operand type if the target has a quick way to compare
+  /// integer values of the given size. Assume that any legal integer type can
+  /// be compared efficiently. Targets may override this to allow illegal wide
+  /// types to return a vector type if there is support to compare that type.
+  virtual MVT hasFastEqualityCompare(unsigned NumBits) const {
+    MVT VT = MVT::getIntegerVT(NumBits);
+    return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE;
+  }
+
+  /// Return true if the target should transform:
+  /// (X & Y) == Y ---> (~X & Y) == 0
+  /// (X & Y) != Y ---> (~X & Y) != 0
+  ///
+  /// This may be profitable if the target has a bitwise and-not operation that
+  /// sets comparison flags. A target may want to limit the transformation based
+  /// on the type of Y or if Y is a constant.
+  ///
+  /// Note that the transform will not occur if Y is known to be a power-of-2
+  /// because a mask and compare of a single bit can be handled by inverting the
+  /// predicate, for example:
+  /// (X & 8) == 8 ---> (X & 8) != 0
+  virtual bool hasAndNotCompare(SDValue Y) const {
+    return false;
+  }
+
+  /// Return true if the target has a bitwise and-not operation:
+  /// X = ~A & B
+  /// This can be used to simplify select or other instructions.
+  virtual bool hasAndNot(SDValue X) const {
+    // If the target has the more complex version of this operation, assume that
+    // it has this operation too.
+    return hasAndNotCompare(X);
+  }
+
+  /// \brief Return true if the target wants to use the optimization that
+  /// turns ext(promotableInst1(...(promotableInstN(load)))) into
+  /// promotedInst1(...(promotedInstN(ext(load)))).
+  bool enableExtLdPromotion() const { return EnableExtLdPromotion; }
+
+  /// Return true if the target can combine store(extractelement VectorTy,
+  /// Idx).
+  /// \p Cost[out] gives the cost of that transformation when this is true.
+  virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
+                                         unsigned &Cost) const {
+    return false;
+  }
+
+  /// Return true if target supports floating point exceptions.
+  bool hasFloatingPointExceptions() const {
+    return HasFloatingPointExceptions;
+  }
+
+  /// Return true if target always beneficiates from combining into FMA for a
+  /// given value type. This must typically return false on targets where FMA
+  /// takes more cycles to execute than FADD.
+  virtual bool enableAggressiveFMAFusion(EVT VT) const {
+    return false;
+  }
+
+  /// Return the ValueType of the result of SETCC operations.
+  virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
+                                 EVT VT) const;
+
+  /// Return the ValueType for comparison libcalls. Comparions libcalls include
+  /// floating point comparion calls, and Ordered/Unordered check calls on
+  /// floating point numbers.
+  virtual
+  MVT::SimpleValueType getCmpLibcallReturnType() const;
+
+  /// For targets without i1 registers, this gives the nature of the high-bits
+  /// of boolean values held in types wider than i1.
+  ///
+  /// "Boolean values" are special true/false values produced by nodes like
+  /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
+  /// Not to be confused with general values promoted from i1.  Some cpus
+  /// distinguish between vectors of boolean and scalars; the isVec parameter
+  /// selects between the two kinds.  For example on X86 a scalar boolean should
+  /// be zero extended from i1, while the elements of a vector of booleans
+  /// should be sign extended from i1.
+  ///
+  /// Some cpus also treat floating point types the same way as they treat
+  /// vectors instead of the way they treat scalars.
+  BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
+    if (isVec)
+      return BooleanVectorContents;
+    return isFloat ? BooleanFloatContents : BooleanContents;
+  }
+
+  BooleanContent getBooleanContents(EVT Type) const {
+    return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
+  }
+
+  /// Return target scheduling preference.
+  Sched::Preference getSchedulingPreference() const {
+    return SchedPreferenceInfo;
+  }
+
+  /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
+  /// for different nodes. This function returns the preference (or none) for
+  /// the given node.
+  virtual Sched::Preference getSchedulingPreference(SDNode *) const {
+    return Sched::None;
+  }
+
+  /// Return the register class that should be used for the specified value
+  /// type.
+  virtual const TargetRegisterClass *getRegClassFor(MVT VT) const {
+    const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
+    assert(RC && "This value type is not natively supported!");
+    return RC;
+  }
+
+  /// Return the 'representative' register class for the specified value
+  /// type.
+  ///
+  /// The 'representative' register class is the largest legal super-reg
+  /// register class for the register class of the value type.  For example, on
+  /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
+  /// register class is GR64 on x86_64.
+  virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
+    const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
+    return RC;
+  }
+
+  /// Return the cost of the 'representative' register class for the specified
+  /// value type.
+  virtual uint8_t getRepRegClassCostFor(MVT VT) const {
+    return RepRegClassCostForVT[VT.SimpleTy];
+  }
+
+  /// Return true if the target has native support for the specified value type.
+  /// This means that it has a register that directly holds it without
+  /// promotions or expansions.
+  bool isTypeLegal(EVT VT) const {
+    assert(!VT.isSimple() ||
+           (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
+    return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
+  }
+
+  class ValueTypeActionImpl {
+    /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
+    /// that indicates how instruction selection should deal with the type.
+    LegalizeTypeAction ValueTypeActions[MVT::LAST_VALUETYPE];
+
+  public:
+    ValueTypeActionImpl() {
+      std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
+                TypeLegal);
+    }
+
+    LegalizeTypeAction getTypeAction(MVT VT) const {
+      return ValueTypeActions[VT.SimpleTy];
+    }
+
+    void setTypeAction(MVT VT, LegalizeTypeAction Action) {
+      ValueTypeActions[VT.SimpleTy] = Action;
+    }
+  };
+
+  const ValueTypeActionImpl &getValueTypeActions() const {
+    return ValueTypeActions;
+  }
+
+  /// Return how we should legalize values of this type, either it is already
+  /// legal (return 'Legal') or we need to promote it to a larger type (return
+  /// 'Promote'), or we need to expand it into multiple registers of smaller
+  /// integer type (return 'Expand').  'Custom' is not an option.
+  LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
+    return getTypeConversion(Context, VT).first;
+  }
+  LegalizeTypeAction getTypeAction(MVT VT) const {
+    return ValueTypeActions.getTypeAction(VT);
+  }
+
+  /// For types supported by the target, this is an identity function.  For
+  /// types that must be promoted to larger types, this returns the larger type
+  /// to promote to.  For integer types that are larger than the largest integer
+  /// register, this contains one step in the expansion to get to the smaller
+  /// register. For illegal floating point types, this returns the integer type
+  /// to transform to.
+  EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
+    return getTypeConversion(Context, VT).second;
+  }
+
+  /// For types supported by the target, this is an identity function.  For
+  /// types that must be expanded (i.e. integer types that are larger than the
+  /// largest integer register or illegal floating point types), this returns
+  /// the largest legal type it will be expanded to.
+  EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
+    assert(!VT.isVector());
+    while (true) {
+      switch (getTypeAction(Context, VT)) {
+      case TypeLegal:
+        return VT;
+      case TypeExpandInteger:
+        VT = getTypeToTransformTo(Context, VT);
+        break;
+      default:
+        llvm_unreachable("Type is not legal nor is it to be expanded!");
+      }
+    }
+  }
+
+  /// Vector types are broken down into some number of legal first class types.
+  /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
+  /// promoted EVT::f64 values with the X86 FP stack.  Similarly, EVT::v2i64
+  /// turns into 4 EVT::i32 values with both PPC and X86.
+  ///
+  /// This method returns the number of registers needed, and the VT for each
+  /// register.  It also returns the VT and quantity of the intermediate values
+  /// before they are promoted/expanded.
+  unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
+                                  EVT &IntermediateVT,
+                                  unsigned &NumIntermediates,
+                                  MVT &RegisterVT) const;
+
+  /// Certain targets such as MIPS require that some types such as vectors are
+  /// always broken down into scalars in some contexts. This occurs even if the
+  /// vector type is legal.
+  virtual unsigned getVectorTypeBreakdownForCallingConv(
+      LLVMContext &Context, EVT VT, EVT &IntermediateVT,
+      unsigned &NumIntermediates, MVT &RegisterVT) const {
+    return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
+                                  RegisterVT);
+  }
+
+  struct IntrinsicInfo {
+    unsigned     opc = 0;          // target opcode
+    EVT          memVT;            // memory VT
+
+    // value representing memory location
+    PointerUnion<const Value *, const PseudoSourceValue *> ptrVal;
+
+    int          offset = 0;       // offset off of ptrVal
+    unsigned     size = 0;         // the size of the memory location
+                                   // (taken from memVT if zero)
+    unsigned     align = 1;        // alignment
+
+    MachineMemOperand::Flags flags = MachineMemOperand::MONone;
+    IntrinsicInfo() = default;
+  };
+
+  /// Given an intrinsic, checks if on the target the intrinsic will need to map
+  /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
+  /// true and store the intrinsic information into the IntrinsicInfo that was
+  /// passed to the function.
+  virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
+                                  MachineFunction &,
+                                  unsigned /*Intrinsic*/) const {
+    return false;
+  }
+
+  /// Returns true if the target can instruction select the specified FP
+  /// immediate natively. If false, the legalizer will materialize the FP
+  /// immediate as a load from a constant pool.
+  virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
+    return false;
+  }
+
+  /// Targets can use this to indicate that they only support *some*
+  /// VECTOR_SHUFFLE operations, those with specific masks.  By default, if a
+  /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
+  /// legal.
+  virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const {
+    return true;
+  }
+
+  /// Returns true if the operation can trap for the value type.
+  ///
+  /// VT must be a legal type. By default, we optimistically assume most
+  /// operations don't trap except for integer divide and remainder.
+  virtual bool canOpTrap(unsigned Op, EVT VT) const;
+
+  /// Similar to isShuffleMaskLegal. This is used by Targets can use this to
+  /// indicate if there is a suitable VECTOR_SHUFFLE that can be used to replace
+  /// a VAND with a constant pool entry.
+  virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
+                                      EVT /*VT*/) const {
+    return false;
+  }
+
+  /// Return how this operation should be treated: either it is legal, needs to
+  /// be promoted to a larger size, needs to be expanded to some other code
+  /// sequence, or the target has a custom expander for it.
+  LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
+    if (VT.isExtended()) return Expand;
+    // If a target-specific SDNode requires legalization, require the target
+    // to provide custom legalization for it.
+    if (Op >= array_lengthof(OpActions[0])) return Custom;
+    return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
+  }
+
+  /// Return true if the specified operation is legal on this target or can be
+  /// made legal with custom lowering. This is used to help guide high-level
+  /// lowering decisions.
+  bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
+    return (VT == MVT::Other || isTypeLegal(VT)) &&
+      (getOperationAction(Op, VT) == Legal ||
+       getOperationAction(Op, VT) == Custom);
+  }
+
+  /// Return true if the specified operation is legal on this target or can be
+  /// made legal using promotion. This is used to help guide high-level lowering
+  /// decisions.
+  bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
+    return (VT == MVT::Other || isTypeLegal(VT)) &&
+      (getOperationAction(Op, VT) == Legal ||
+       getOperationAction(Op, VT) == Promote);
+  }
+
+  /// Return true if the specified operation is legal on this target or can be
+  /// made legal with custom lowering or using promotion. This is used to help
+  /// guide high-level lowering decisions.
+  bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT) const {
+    return (VT == MVT::Other || isTypeLegal(VT)) &&
+      (getOperationAction(Op, VT) == Legal ||
+       getOperationAction(Op, VT) == Custom ||
+       getOperationAction(Op, VT) == Promote);
+  }
+
+  /// Return true if the operation uses custom lowering, regardless of whether
+  /// the type is legal or not.
+  bool isOperationCustom(unsigned Op, EVT VT) const {
+    return getOperationAction(Op, VT) == Custom;
+  }
+
+  /// Return true if lowering to a jump table is allowed.
+  virtual bool areJTsAllowed(const Function *Fn) const {
+    if (Fn->getFnAttribute("no-jump-tables").getValueAsString() == "true")
+      return false;
+
+    return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
+           isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
+  }
+
+  /// Check whether the range [Low,High] fits in a machine word.
+  bool rangeFitsInWord(const APInt &Low, const APInt &High,
+                       const DataLayout &DL) const {
+    // FIXME: Using the pointer type doesn't seem ideal.
+    uint64_t BW = DL.getIndexSizeInBits(0u);
+    uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
+    return Range <= BW;
+  }
+
+  /// Return true if lowering to a jump table is suitable for a set of case
+  /// clusters which may contain \p NumCases cases, \p Range range of values.
+  /// FIXME: This function check the maximum table size and density, but the
+  /// minimum size is not checked. It would be nice if the minimum size is
+  /// also combined within this function. Currently, the minimum size check is
+  /// performed in findJumpTable() in SelectionDAGBuiler and
+  /// getEstimatedNumberOfCaseClusters() in BasicTTIImpl.
+  virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
+                                      uint64_t Range) const {
+    const bool OptForSize = SI->getParent()->getParent()->optForSize();
+    const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize);
+    const unsigned MaxJumpTableSize =
+        OptForSize || getMaximumJumpTableSize() == 0
+            ? UINT_MAX
+            : getMaximumJumpTableSize();
+    // Check whether a range of clusters is dense enough for a jump table.
+    if (Range <= MaxJumpTableSize &&
+        (NumCases * 100 >= Range * MinDensity)) {
+      return true;
+    }
+    return false;
+  }
+
+  /// Return true if lowering to a bit test is suitable for a set of case
+  /// clusters which contains \p NumDests unique destinations, \p Low and
+  /// \p High as its lowest and highest case values, and expects \p NumCmps
+  /// case value comparisons. Check if the number of destinations, comparison
+  /// metric, and range are all suitable.
+  bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
+                             const APInt &Low, const APInt &High,
+                             const DataLayout &DL) const {
+    // FIXME: I don't think NumCmps is the correct metric: a single case and a
+    // range of cases both require only one branch to lower. Just looking at the
+    // number of clusters and destinations should be enough to decide whether to
+    // build bit tests.
+
+    // To lower a range with bit tests, the range must fit the bitwidth of a
+    // machine word.
+    if (!rangeFitsInWord(Low, High, DL))
+      return false;
+
+    // Decide whether it's profitable to lower this range with bit tests. Each
+    // destination requires a bit test and branch, and there is an overall range
+    // check branch. For a small number of clusters, separate comparisons might
+    // be cheaper, and for many destinations, splitting the range might be
+    // better.
+    return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
+           (NumDests == 3 && NumCmps >= 6);
+  }
+
+  /// Return true if the specified operation is illegal on this target or
+  /// unlikely to be made legal with custom lowering. This is used to help guide
+  /// high-level lowering decisions.
+  bool isOperationExpand(unsigned Op, EVT VT) const {
+    return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
+  }
+
+  /// Return true if the specified operation is legal on this target.
+  bool isOperationLegal(unsigned Op, EVT VT) const {
+    return (VT == MVT::Other || isTypeLegal(VT)) &&
+           getOperationAction(Op, VT) == Legal;
+  }
+
+  /// Return how this load with extension should be treated: either it is legal,
+  /// needs to be promoted to a larger size, needs to be expanded to some other
+  /// code sequence, or the target has a custom expander for it.
+  LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
+                                  EVT MemVT) const {
+    if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
+    unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
+    unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
+    assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE &&
+           MemI < MVT::LAST_VALUETYPE && "Table isn't big enough!");
+    unsigned Shift = 4 * ExtType;
+    return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
+  }
+
+  /// Return true if the specified load with extension is legal on this target.
+  bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
+    return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
+  }
+
+  /// Return true if the specified load with extension is legal or custom
+  /// on this target.
+  bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
+    return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
+           getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
+  }
+
+  /// Return how this store with truncation should be treated: either it is
+  /// legal, needs to be promoted to a larger size, needs to be expanded to some
+  /// other code sequence, or the target has a custom expander for it.
+  LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
+    if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
+    unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
+    unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
+    assert(ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&
+           "Table isn't big enough!");
+    return TruncStoreActions[ValI][MemI];
+  }
+
+  /// Return true if the specified store with truncation is legal on this
+  /// target.
+  bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
+    return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
+  }
+
+  /// Return true if the specified store with truncation has solution on this
+  /// target.
+  bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
+    return isTypeLegal(ValVT) &&
+      (getTruncStoreAction(ValVT, MemVT) == Legal ||
+       getTruncStoreAction(ValVT, MemVT) == Custom);
+  }
+
+  /// Return how the indexed load should be treated: either it is legal, needs
+  /// to be promoted to a larger size, needs to be expanded to some other code
+  /// sequence, or the target has a custom expander for it.
+  LegalizeAction
+  getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
+    assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
+           "Table isn't big enough!");
+    unsigned Ty = (unsigned)VT.SimpleTy;
+    return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
+  }
+
+  /// Return true if the specified indexed load is legal on this target.
+  bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
+    return VT.isSimple() &&
+      (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
+       getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
+  }
+
+  /// Return how the indexed store should be treated: either it is legal, needs
+  /// to be promoted to a larger size, needs to be expanded to some other code
+  /// sequence, or the target has a custom expander for it.
+  LegalizeAction
+  getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
+    assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
+           "Table isn't big enough!");
+    unsigned Ty = (unsigned)VT.SimpleTy;
+    return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
+  }
+
+  /// Return true if the specified indexed load is legal on this target.
+  bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
+    return VT.isSimple() &&
+      (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
+       getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
+  }
+
+  /// Return how the condition code should be treated: either it is legal, needs
+  /// to be expanded to some other code sequence, or the target has a custom
+  /// expander for it.
+  LegalizeAction
+  getCondCodeAction(ISD::CondCode CC, MVT VT) const {
+    assert((unsigned)CC < array_lengthof(CondCodeActions) &&
+           ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) &&
+           "Table isn't big enough!");
+    // See setCondCodeAction for how this is encoded.
+    uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
+    uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
+    LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
+    assert(Action != Promote && "Can't promote condition code!");
+    return Action;
+  }
+
+  /// Return true if the specified condition code is legal on this target.
+  bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
+    return getCondCodeAction(CC, VT) == Legal;
+  }
+
+  /// Return true if the specified condition code is legal or custom on this
+  /// target.
+  bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const {
+    return getCondCodeAction(CC, VT) == Legal ||
+           getCondCodeAction(CC, VT) == Custom;
+  }
+
+  /// If the action for this operation is to promote, this method returns the
+  /// ValueType to promote to.
+  MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
+    assert(getOperationAction(Op, VT) == Promote &&
+           "This operation isn't promoted!");
+
+    // See if this has an explicit type specified.
+    std::map<std::pair<unsigned, MVT::SimpleValueType>,
+             MVT::SimpleValueType>::const_iterator PTTI =
+      PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
+    if (PTTI != PromoteToType.end()) return PTTI->second;
+
+    assert((VT.isInteger() || VT.isFloatingPoint()) &&
+           "Cannot autopromote this type, add it with AddPromotedToType.");
+
+    MVT NVT = VT;
+    do {
+      NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
+      assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
+             "Didn't find type to promote to!");
+    } while (!isTypeLegal(NVT) ||
+              getOperationAction(Op, NVT) == Promote);
+    return NVT;
+  }
+
+  /// Return the EVT corresponding to this LLVM type.  This is fixed by the LLVM
+  /// operations except for the pointer size.  If AllowUnknown is true, this
+  /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
+  /// otherwise it will assert.
+  EVT getValueType(const DataLayout &DL, Type *Ty,
+                   bool AllowUnknown = false) const {
+    // Lower scalar pointers to native pointer types.
+    if (PointerType *PTy = dyn_cast<PointerType>(Ty))
+      return getPointerTy(DL, PTy->getAddressSpace());
+
+    if (Ty->isVectorTy()) {
+      VectorType *VTy = cast<VectorType>(Ty);
+      Type *Elm = VTy->getElementType();
+      // Lower vectors of pointers to native pointer types.
+      if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
+        EVT PointerTy(getPointerTy(DL, PT->getAddressSpace()));
+        Elm = PointerTy.getTypeForEVT(Ty->getContext());
+      }
+
+      return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
+                       VTy->getNumElements());
+    }
+    return EVT::getEVT(Ty, AllowUnknown);
+  }
+
+  /// Return the MVT corresponding to this LLVM type. See getValueType.
+  MVT getSimpleValueType(const DataLayout &DL, Type *Ty,
+                         bool AllowUnknown = false) const {
+    return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
+  }
+
+  /// Return the desired alignment for ByVal or InAlloca aggregate function
+  /// arguments in the caller parameter area.  This is the actual alignment, not
+  /// its logarithm.
+  virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
+
+  /// Return the type of registers that this ValueType will eventually require.
+  MVT getRegisterType(MVT VT) const {
+    assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
+    return RegisterTypeForVT[VT.SimpleTy];
+  }
+
+  /// Return the type of registers that this ValueType will eventually require.
+  MVT getRegisterType(LLVMContext &Context, EVT VT) const {
+    if (VT.isSimple()) {
+      assert((unsigned)VT.getSimpleVT().SimpleTy <
+                array_lengthof(RegisterTypeForVT));
+      return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
+    }
+    if (VT.isVector()) {
+      EVT VT1;
+      MVT RegisterVT;
+      unsigned NumIntermediates;
+      (void)getVectorTypeBreakdown(Context, VT, VT1,
+                                   NumIntermediates, RegisterVT);
+      return RegisterVT;
+    }
+    if (VT.isInteger()) {
+      return getRegisterType(Context, getTypeToTransformTo(Context, VT));
+    }
+    llvm_unreachable("Unsupported extended type!");
+  }
+
+  /// Return the number of registers that this ValueType will eventually
+  /// require.
+  ///
+  /// This is one for any types promoted to live in larger registers, but may be
+  /// more than one for types (like i64) that are split into pieces.  For types
+  /// like i140, which are first promoted then expanded, it is the number of
+  /// registers needed to hold all the bits of the original type.  For an i140
+  /// on a 32 bit machine this means 5 registers.
+  unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
+    if (VT.isSimple()) {
+      assert((unsigned)VT.getSimpleVT().SimpleTy <
+                array_lengthof(NumRegistersForVT));
+      return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
+    }
+    if (VT.isVector()) {
+      EVT VT1;
+      MVT VT2;
+      unsigned NumIntermediates;
+      return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
+    }
+    if (VT.isInteger()) {
+      unsigned BitWidth = VT.getSizeInBits();
+      unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
+      return (BitWidth + RegWidth - 1) / RegWidth;
+    }
+    llvm_unreachable("Unsupported extended type!");
+  }
+
+  /// Certain combinations of ABIs, Targets and features require that types
+  /// are legal for some operations and not for other operations.
+  /// For MIPS all vector types must be passed through the integer register set.
+  virtual MVT getRegisterTypeForCallingConv(MVT VT) const {
+    return getRegisterType(VT);
+  }
+
+  virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context,
+                                            EVT VT) const {
+    return getRegisterType(Context, VT);
+  }
+
+  /// Certain targets require unusual breakdowns of certain types. For MIPS,
+  /// this occurs when a vector type is used, as vector are passed through the
+  /// integer register set.
+  virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context,
+                                                 EVT VT) const {
+    return getNumRegisters(Context, VT);
+  }
+
+  /// Certain targets have context senstive alignment requirements, where one
+  /// type has the alignment requirement of another type.
+  virtual unsigned getABIAlignmentForCallingConv(Type *ArgTy,
+                                                 DataLayout DL) const {
+    return DL.getABITypeAlignment(ArgTy);
+  }
+
+  /// If true, then instruction selection should seek to shrink the FP constant
+  /// of the specified type to a smaller type in order to save space and / or
+  /// reduce runtime.
+  virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
+
+  // Return true if it is profitable to reduce the given load node to a smaller
+  // type.
+  //
+  // e.g. (i16 (trunc (i32 (load x))) -> i16 load x should be performed
+  virtual bool shouldReduceLoadWidth(SDNode *Load,
+                                     ISD::LoadExtType ExtTy,
+                                     EVT NewVT) const {
+    return true;
+  }
+
+  /// When splitting a value of the specified type into parts, does the Lo
+  /// or Hi part come first?  This usually follows the endianness, except
+  /// for ppcf128, where the Hi part always comes first.
+  bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const {
+    return DL.isBigEndian() || VT == MVT::ppcf128;
+  }
+
+  /// If true, the target has custom DAG combine transformations that it can
+  /// perform for the specified node.
+  bool hasTargetDAGCombine(ISD::NodeType NT) const {
+    assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
+    return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
+  }
+
+  unsigned getGatherAllAliasesMaxDepth() const {
+    return GatherAllAliasesMaxDepth;
+  }
+
+  /// Returns the size of the platform's va_list object.
+  virtual unsigned getVaListSizeInBits(const DataLayout &DL) const {
+    return getPointerTy(DL).getSizeInBits();
+  }
+
+  /// \brief Get maximum # of store operations permitted for llvm.memset
+  ///
+  /// This function returns the maximum number of store operations permitted
+  /// to replace a call to llvm.memset. The value is set by the target at the
+  /// performance threshold for such a replacement. If OptSize is true,
+  /// return the limit for functions that have OptSize attribute.
+  unsigned getMaxStoresPerMemset(bool OptSize) const {
+    return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
+  }
+
+  /// \brief Get maximum # of store operations permitted for llvm.memcpy
+  ///
+  /// This function returns the maximum number of store operations permitted
+  /// to replace a call to llvm.memcpy. The value is set by the target at the
+  /// performance threshold for such a replacement. If OptSize is true,
+  /// return the limit for functions that have OptSize attribute.
+  unsigned getMaxStoresPerMemcpy(bool OptSize) const {
+    return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
+  }
+
+  /// Get maximum # of load operations permitted for memcmp
+  ///
+  /// This function returns the maximum number of load operations permitted
+  /// to replace a call to memcmp. The value is set by the target at the
+  /// performance threshold for such a replacement. If OptSize is true,
+  /// return the limit for functions that have OptSize attribute.
+  unsigned getMaxExpandSizeMemcmp(bool OptSize) const {
+    return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp;
+  }
+
+  /// For memcmp expansion when the memcmp result is only compared equal or
+  /// not-equal to 0, allow up to this number of load pairs per block. As an
+  /// example, this may allow 'memcmp(a, b, 3) == 0' in a single block:
+  ///   a0 = load2bytes &a[0]
+  ///   b0 = load2bytes &b[0]
+  ///   a2 = load1byte  &a[2]
+  ///   b2 = load1byte  &b[2]
+  ///   r  = cmp eq (a0 ^ b0 | a2 ^ b2), 0
+  virtual unsigned getMemcmpEqZeroLoadsPerBlock() const {
+    return 1;
+  }
+
+  /// \brief Get maximum # of store operations permitted for llvm.memmove
+  ///
+  /// This function returns the maximum number of store operations permitted
+  /// to replace a call to llvm.memmove. The value is set by the target at the
+  /// performance threshold for such a replacement. If OptSize is true,
+  /// return the limit for functions that have OptSize attribute.
+  unsigned getMaxStoresPerMemmove(bool OptSize) const {
+    return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
+  }
+
+  /// \brief Determine if the target supports unaligned memory accesses.
+  ///
+  /// This function returns true if the target allows unaligned memory accesses
+  /// of the specified type in the given address space. If true, it also returns
+  /// whether the unaligned memory access is "fast" in the last argument by
+  /// reference. This is used, for example, in situations where an array
+  /// copy/move/set is converted to a sequence of store operations. Its use
+  /// helps to ensure that such replacements don't generate code that causes an
+  /// alignment error (trap) on the target machine.
+  virtual bool allowsMisalignedMemoryAccesses(EVT,
+                                              unsigned AddrSpace = 0,
+                                              unsigned Align = 1,
+                                              bool * /*Fast*/ = nullptr) const {
+    return false;
+  }
+
+  /// Return true if the target supports a memory access of this type for the
+  /// given address space and alignment. If the access is allowed, the optional
+  /// final parameter returns if the access is also fast (as defined by the
+  /// target).
+  bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
+                          unsigned AddrSpace = 0, unsigned Alignment = 1,
+                          bool *Fast = nullptr) const;
+
+  /// Returns the target specific optimal type for load and store operations as
+  /// a result of memset, memcpy, and memmove lowering.
+  ///
+  /// If DstAlign is zero that means it's safe to destination alignment can
+  /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
+  /// a need to check it against alignment requirement, probably because the
+  /// source does not need to be loaded. If 'IsMemset' is true, that means it's
+  /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
+  /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
+  /// does not need to be loaded.  It returns EVT::Other if the type should be
+  /// determined using generic target-independent logic.
+  virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
+                                  unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
+                                  bool /*IsMemset*/,
+                                  bool /*ZeroMemset*/,
+                                  bool /*MemcpyStrSrc*/,
+                                  MachineFunction &/*MF*/) const {
+    return MVT::Other;
+  }
+
+  /// Returns true if it's safe to use load / store of the specified type to
+  /// expand memcpy / memset inline.
+  ///
+  /// This is mostly true for all types except for some special cases. For
+  /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
+  /// fstpl which also does type conversion. Note the specified type doesn't
+  /// have to be legal as the hook is used before type legalization.
+  virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
+
+  /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
+  bool usesUnderscoreSetJmp() const {
+    return UseUnderscoreSetJmp;
+  }
+
+  /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
+  bool usesUnderscoreLongJmp() const {
+    return UseUnderscoreLongJmp;
+  }
+
+  /// Return lower limit for number of blocks in a jump table.
+  virtual unsigned getMinimumJumpTableEntries() const;
+
+  /// Return lower limit of the density in a jump table.
+  unsigned getMinimumJumpTableDensity(bool OptForSize) const;
+
+  /// Return upper limit for number of entries in a jump table.
+  /// Zero if no limit.
+  unsigned getMaximumJumpTableSize() const;
+
+  virtual bool isJumpTableRelative() const {
+    return TM.isPositionIndependent();
+  }
+
+  /// If a physical register, this specifies the register that
+  /// llvm.savestack/llvm.restorestack should save and restore.
+  unsigned getStackPointerRegisterToSaveRestore() const {
+    return StackPointerRegisterToSaveRestore;
+  }
+
+  /// If a physical register, this returns the register that receives the
+  /// exception address on entry to an EH pad.
+  virtual unsigned
+  getExceptionPointerRegister(const Constant *PersonalityFn) const {
+    // 0 is guaranteed to be the NoRegister value on all targets
+    return 0;
+  }
+
+  /// If a physical register, this returns the register that receives the
+  /// exception typeid on entry to a landing pad.
+  virtual unsigned
+  getExceptionSelectorRegister(const Constant *PersonalityFn) const {
+    // 0 is guaranteed to be the NoRegister value on all targets
+    return 0;
+  }
+
+  virtual bool needsFixedCatchObjects() const {
+    report_fatal_error("Funclet EH is not implemented for this target");
+  }
+
+  /// Returns the target's jmp_buf size in bytes (if never set, the default is
+  /// 200)
+  unsigned getJumpBufSize() const {
+    return JumpBufSize;
+  }
+
+  /// Returns the target's jmp_buf alignment in bytes (if never set, the default
+  /// is 0)
+  unsigned getJumpBufAlignment() const {
+    return JumpBufAlignment;
+  }
+
+  /// Return the minimum stack alignment of an argument.
+  unsigned getMinStackArgumentAlignment() const {
+    return MinStackArgumentAlignment;
+  }
+
+  /// Return the minimum function alignment.
+  unsigned getMinFunctionAlignment() const {
+    return MinFunctionAlignment;
+  }
+
+  /// Return the preferred function alignment.
+  unsigned getPrefFunctionAlignment() const {
+    return PrefFunctionAlignment;
+  }
+
+  /// Return the preferred loop alignment.
+  virtual unsigned getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
+    return PrefLoopAlignment;
+  }
+
+  /// If the target has a standard location for the stack protector guard,
+  /// returns the address of that location. Otherwise, returns nullptr.
+  /// DEPRECATED: please override useLoadStackGuardNode and customize
+  ///             LOAD_STACK_GUARD, or customize @llvm.stackguard().
+  virtual Value *getIRStackGuard(IRBuilder<> &IRB) const;
+
+  /// Inserts necessary declarations for SSP (stack protection) purpose.
+  /// Should be used only when getIRStackGuard returns nullptr.
+  virtual void insertSSPDeclarations(Module &M) const;
+
+  /// Return the variable that's previously inserted by insertSSPDeclarations,
+  /// if any, otherwise return nullptr. Should be used only when
+  /// getIRStackGuard returns nullptr.
+  virtual Value *getSDagStackGuard(const Module &M) const;
+
+  /// If this function returns true, stack protection checks should XOR the
+  /// frame pointer (or whichever pointer is used to address locals) into the
+  /// stack guard value before checking it. getIRStackGuard must return nullptr
+  /// if this returns true.
+  virtual bool useStackGuardXorFP() const { return false; }
+
+  /// If the target has a standard stack protection check function that
+  /// performs validation and error handling, returns the function. Otherwise,
+  /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
+  /// Should be used only when getIRStackGuard returns nullptr.
+  virtual Value *getSSPStackGuardCheck(const Module &M) const;
+
+protected:
+  Value *getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
+                                            bool UseTLS) const;
+
+public:
+  /// Returns the target-specific address of the unsafe stack pointer.
+  virtual Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const;
+
+  /// Returns the name of the symbol used to emit stack probes or the empty
+  /// string if not applicable.
+  virtual StringRef getStackProbeSymbolName(MachineFunction &MF) const {
+    return "";
+  }
+
+  /// Returns true if a cast between SrcAS and DestAS is a noop.
+  virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
+    return false;
+  }
+
+  /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
+  /// are happy to sink it into basic blocks.
+  virtual bool isCheapAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
+    return isNoopAddrSpaceCast(SrcAS, DestAS);
+  }
+
+  /// Return true if the pointer arguments to CI should be aligned by aligning
+  /// the object whose address is being passed. If so then MinSize is set to the
+  /// minimum size the object must be to be aligned and PrefAlign is set to the
+  /// preferred alignment.
+  virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
+                                      unsigned & /*PrefAlign*/) const {
+    return false;
+  }
+
+  //===--------------------------------------------------------------------===//
+  /// \name Helpers for TargetTransformInfo implementations
+  /// @{
+
+  /// Get the ISD node that corresponds to the Instruction class opcode.
+  int InstructionOpcodeToISD(unsigned Opcode) const;
+
+  /// Estimate the cost of type-legalization and the legalized type.
+  std::pair<int, MVT> getTypeLegalizationCost(const DataLayout &DL,
+                                              Type *Ty) const;
+
+  /// @}
+
+  //===--------------------------------------------------------------------===//
+  /// \name Helpers for atomic expansion.
+  /// @{
+
+  /// Returns the maximum atomic operation size (in bits) supported by
+  /// the backend. Atomic operations greater than this size (as well
+  /// as ones that are not naturally aligned), will be expanded by
+  /// AtomicExpandPass into an __atomic_* library call.
+  unsigned getMaxAtomicSizeInBitsSupported() const {
+    return MaxAtomicSizeInBitsSupported;
+  }
+
+  /// Returns the size of the smallest cmpxchg or ll/sc instruction
+  /// the backend supports.  Any smaller operations are widened in
+  /// AtomicExpandPass.
+  ///
+  /// Note that *unlike* operations above the maximum size, atomic ops
+  /// are still natively supported below the minimum; they just
+  /// require a more complex expansion.
+  unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
+
+  /// Whether the target supports unaligned atomic operations.
+  bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }
+
+  /// Whether AtomicExpandPass should automatically insert fences and reduce
+  /// ordering for this atomic. This should be true for most architectures with
+  /// weak memory ordering. Defaults to false.
+  virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
+    return false;
+  }
+
+  /// Perform a load-linked operation on Addr, returning a "Value *" with the
+  /// corresponding pointee type. This may entail some non-trivial operations to
+  /// truncate or reconstruct types that will be illegal in the backend. See
+  /// ARMISelLowering for an example implementation.
+  virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
+                                AtomicOrdering Ord) const {
+    llvm_unreachable("Load linked unimplemented on this target");
+  }
+
+  /// Perform a store-conditional operation to Addr. Return the status of the
+  /// store. This should be 0 if the store succeeded, non-zero otherwise.
+  virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
+                                      Value *Addr, AtomicOrdering Ord) const {
+    llvm_unreachable("Store conditional unimplemented on this target");
+  }
+
+  /// Inserts in the IR a target-specific intrinsic specifying a fence.
+  /// It is called by AtomicExpandPass before expanding an
+  ///   AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
+  ///   if shouldInsertFencesForAtomic returns true.
+  ///
+  /// Inst is the original atomic instruction, prior to other expansions that
+  /// may be performed.
+  ///
+  /// This function should either return a nullptr, or a pointer to an IR-level
+  ///   Instruction*. Even complex fence sequences can be represented by a
+  ///   single Instruction* through an intrinsic to be lowered later.
+  /// Backends should override this method to produce target-specific intrinsic
+  ///   for their fences.
+  /// FIXME: Please note that the default implementation here in terms of
+  ///   IR-level fences exists for historical/compatibility reasons and is
+  ///   *unsound* ! Fences cannot, in general, be used to restore sequential
+  ///   consistency. For example, consider the following example:
+  /// atomic<int> x = y = 0;
+  /// int r1, r2, r3, r4;
+  /// Thread 0:
+  ///   x.store(1);
+  /// Thread 1:
+  ///   y.store(1);
+  /// Thread 2:
+  ///   r1 = x.load();
+  ///   r2 = y.load();
+  /// Thread 3:
+  ///   r3 = y.load();
+  ///   r4 = x.load();
+  ///  r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
+  ///  seq_cst. But if they are lowered to monotonic accesses, no amount of
+  ///  IR-level fences can prevent it.
+  /// @{
+  virtual Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
+                                        AtomicOrdering Ord) const {
+    if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
+      return Builder.CreateFence(Ord);
+    else
+      return nullptr;
+  }
+
+  virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
+                                         Instruction *Inst,
+                                         AtomicOrdering Ord) const {
+    if (isAcquireOrStronger(Ord))
+      return Builder.CreateFence(Ord);
+    else
+      return nullptr;
+  }
+  /// @}
+
+  // Emits code that executes when the comparison result in the ll/sc
+  // expansion of a cmpxchg instruction is such that the store-conditional will
+  // not execute.  This makes it possible to balance out the load-linked with
+  // a dedicated instruction, if desired.
+  // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
+  // be unnecessarily held, except if clrex, inserted by this hook, is executed.
+  virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const {}
+
+  /// Returns true if the given (atomic) store should be expanded by the
+  /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
+  virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const {
+    return false;
+  }
+
+  /// Returns true if arguments should be sign-extended in lib calls.
+  virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
+    return IsSigned;
+  }
+
+  /// Returns how the given (atomic) load should be expanded by the
+  /// IR-level AtomicExpand pass.
+  virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const {
+    return AtomicExpansionKind::None;
+  }
+
+  /// Returns true if the given atomic cmpxchg should be expanded by the
+  /// IR-level AtomicExpand pass into a load-linked/store-conditional sequence
+  /// (through emitLoadLinked() and emitStoreConditional()).
+  virtual bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
+    return false;
+  }
+
+  /// Returns how the IR-level AtomicExpand pass should expand the given
+  /// AtomicRMW, if at all. Default is to never expand.
+  virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const {
+    return AtomicExpansionKind::None;
+  }
+
+  /// On some platforms, an AtomicRMW that never actually modifies the value
+  /// (such as fetch_add of 0) can be turned into a fence followed by an
+  /// atomic load. This may sound useless, but it makes it possible for the
+  /// processor to keep the cacheline shared, dramatically improving
+  /// performance. And such idempotent RMWs are useful for implementing some
+  /// kinds of locks, see for example (justification + benchmarks):
+  /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
+  /// This method tries doing that transformation, returning the atomic load if
+  /// it succeeds, and nullptr otherwise.
+  /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
+  /// another round of expansion.
+  virtual LoadInst *
+  lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
+    return nullptr;
+  }
+
+  /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
+  /// SIGN_EXTEND, or ANY_EXTEND).
+  virtual ISD::NodeType getExtendForAtomicOps() const {
+    return ISD::ZERO_EXTEND;
+  }
+
+  /// @}
+
+  /// Returns true if we should normalize
+  /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
+  /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
+  /// that it saves us from materializing N0 and N1 in an integer register.
+  /// Targets that are able to perform and/or on flags should return false here.
+  virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context,
+                                               EVT VT) const {
+    // If a target has multiple condition registers, then it likely has logical
+    // operations on those registers.
+    if (hasMultipleConditionRegisters())
+      return false;
+    // Only do the transform if the value won't be split into multiple
+    // registers.
+    LegalizeTypeAction Action = getTypeAction(Context, VT);
+    return Action != TypeExpandInteger && Action != TypeExpandFloat &&
+      Action != TypeSplitVector;
+  }
+
+  /// Return true if a select of constants (select Cond, C1, C2) should be
+  /// transformed into simple math ops with the condition value. For example:
+  /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
+  virtual bool convertSelectOfConstantsToMath(EVT VT) const {
+    return false;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // TargetLowering Configuration Methods - These methods should be invoked by
+  // the derived class constructor to configure this object for the target.
+  //
+protected:
+  /// Specify how the target extends the result of integer and floating point
+  /// boolean values from i1 to a wider type.  See getBooleanContents.
+  void setBooleanContents(BooleanContent Ty) {
+    BooleanContents = Ty;
+    BooleanFloatContents = Ty;
+  }
+
+  /// Specify how the target extends the result of integer and floating point
+  /// boolean values from i1 to a wider type.  See getBooleanContents.
+  void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
+    BooleanContents = IntTy;
+    BooleanFloatContents = FloatTy;
+  }
+
+  /// Specify how the target extends the result of a vector boolean value from a
+  /// vector of i1 to a wider type.  See getBooleanContents.
+  void setBooleanVectorContents(BooleanContent Ty) {
+    BooleanVectorContents = Ty;
+  }
+
+  /// Specify the target scheduling preference.
+  void setSchedulingPreference(Sched::Preference Pref) {
+    SchedPreferenceInfo = Pref;
+  }
+
+  /// Indicate whether this target prefers to use _setjmp to implement
+  /// llvm.setjmp or the version without _.  Defaults to false.
+  void setUseUnderscoreSetJmp(bool Val) {
+    UseUnderscoreSetJmp = Val;
+  }
+
+  /// Indicate whether this target prefers to use _longjmp to implement
+  /// llvm.longjmp or the version without _.  Defaults to false.
+  void setUseUnderscoreLongJmp(bool Val) {
+    UseUnderscoreLongJmp = Val;
+  }
+
+  /// Indicate the minimum number of blocks to generate jump tables.
+  void setMinimumJumpTableEntries(unsigned Val);
+
+  /// Indicate the maximum number of entries in jump tables.
+  /// Set to zero to generate unlimited jump tables.
+  void setMaximumJumpTableSize(unsigned);
+
+  /// If set to a physical register, this specifies the register that
+  /// llvm.savestack/llvm.restorestack should save and restore.
+  void setStackPointerRegisterToSaveRestore(unsigned R) {
+    StackPointerRegisterToSaveRestore = R;
+  }
+
+  /// Tells the code generator that the target has multiple (allocatable)
+  /// condition registers that can be used to store the results of comparisons
+  /// for use by selects and conditional branches. With multiple condition
+  /// registers, the code generator will not aggressively sink comparisons into
+  /// the blocks of their users.
+  void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
+    HasMultipleConditionRegisters = hasManyRegs;
+  }
+
+  /// Tells the code generator that the target has BitExtract instructions.
+  /// The code generator will aggressively sink "shift"s into the blocks of
+  /// their users if the users will generate "and" instructions which can be
+  /// combined with "shift" to BitExtract instructions.
+  void setHasExtractBitsInsn(bool hasExtractInsn = true) {
+    HasExtractBitsInsn = hasExtractInsn;
+  }
+
+  /// Tells the code generator not to expand logic operations on comparison
+  /// predicates into separate sequences that increase the amount of flow
+  /// control.
+  void setJumpIsExpensive(bool isExpensive = true);
+
+  /// Tells the code generator that this target supports floating point
+  /// exceptions and cares about preserving floating point exception behavior.
+  void setHasFloatingPointExceptions(bool FPExceptions = true) {
+    HasFloatingPointExceptions = FPExceptions;
+  }
+
+  /// Tells the code generator which bitwidths to bypass.
+  void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
+    BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
+  }
+
+  /// Add the specified register class as an available regclass for the
+  /// specified value type. This indicates the selector can handle values of
+  /// that class natively.
+  void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
+    assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
+    RegClassForVT[VT.SimpleTy] = RC;
+  }
+
+  /// Return the largest legal super-reg register class of the register class
+  /// for the specified type and its associated "cost".
+  virtual std::pair<const TargetRegisterClass *, uint8_t>
+  findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;
+
+  /// Once all of the register classes are added, this allows us to compute
+  /// derived properties we expose.
+  void computeRegisterProperties(const TargetRegisterInfo *TRI);
+
+  /// Indicate that the specified operation does not work with the specified
+  /// type and indicate what to do about it. Note that VT may refer to either
+  /// the type of a result or that of an operand of Op.
+  void setOperationAction(unsigned Op, MVT VT,
+                          LegalizeAction Action) {
+    assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
+    OpActions[(unsigned)VT.SimpleTy][Op] = Action;
+  }
+
+  /// Indicate that the specified load with extension does not work with the
+  /// specified type and indicate what to do about it.
+  void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
+                        LegalizeAction Action) {
+    assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
+           MemVT.isValid() && "Table isn't big enough!");
+    assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
+    unsigned Shift = 4 * ExtType;
+    LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
+    LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
+  }
+
+  /// Indicate that the specified truncating store does not work with the
+  /// specified type and indicate what to do about it.
+  void setTruncStoreAction(MVT ValVT, MVT MemVT,
+                           LegalizeAction Action) {
+    assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
+    TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
+  }
+
+  /// Indicate that the specified indexed load does or does not work with the
+  /// specified type and indicate what to do abort it.
+  ///
+  /// NOTE: All indexed mode loads are initialized to Expand in
+  /// TargetLowering.cpp
+  void setIndexedLoadAction(unsigned IdxMode, MVT VT,
+                            LegalizeAction Action) {
+    assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
+           (unsigned)Action < 0xf && "Table isn't big enough!");
+    // Load action are kept in the upper half.
+    IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
+    IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
+  }
+
+  /// Indicate that the specified indexed store does or does not work with the
+  /// specified type and indicate what to do about it.
+  ///
+  /// NOTE: All indexed mode stores are initialized to Expand in
+  /// TargetLowering.cpp
+  void setIndexedStoreAction(unsigned IdxMode, MVT VT,
+                             LegalizeAction Action) {
+    assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
+           (unsigned)Action < 0xf && "Table isn't big enough!");
+    // Store action are kept in the lower half.
+    IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
+    IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
+  }
+
+  /// Indicate that the specified condition code is or isn't supported on the
+  /// target and indicate what to do about it.
+  void setCondCodeAction(ISD::CondCode CC, MVT VT,
+                         LegalizeAction Action) {
+    assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) &&
+           "Table isn't big enough!");
+    assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
+    /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 32-bit
+    /// value and the upper 29 bits index into the second dimension of the array
+    /// to select what 32-bit value to use.
+    uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
+    CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
+    CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
+  }
+
+  /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
+  /// to trying a larger integer/fp until it can find one that works. If that
+  /// default is insufficient, this method can be used by the target to override
+  /// the default.
+  void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
+    PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
+  }
+
+  /// Convenience method to set an operation to Promote and specify the type
+  /// in a single call.
+  void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
+    setOperationAction(Opc, OrigVT, Promote);
+    AddPromotedToType(Opc, OrigVT, DestVT);
+  }
+
+  /// Targets should invoke this method for each target independent node that
+  /// they want to provide a custom DAG combiner for by implementing the
+  /// PerformDAGCombine virtual method.
+  void setTargetDAGCombine(ISD::NodeType NT) {
+    assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
+    TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
+  }
+
+  /// Set the target's required jmp_buf buffer size (in bytes); default is 200
+  void setJumpBufSize(unsigned Size) {
+    JumpBufSize = Size;
+  }
+
+  /// Set the target's required jmp_buf buffer alignment (in bytes); default is
+  /// 0
+  void setJumpBufAlignment(unsigned Align) {
+    JumpBufAlignment = Align;
+  }
+
+  /// Set the target's minimum function alignment (in log2(bytes))
+  void setMinFunctionAlignment(unsigned Align) {
+    MinFunctionAlignment = Align;
+  }
+
+  /// Set the target's preferred function alignment.  This should be set if
+  /// there is a performance benefit to higher-than-minimum alignment (in
+  /// log2(bytes))
+  void setPrefFunctionAlignment(unsigned Align) {
+    PrefFunctionAlignment = Align;
+  }
+
+  /// Set the target's preferred loop alignment. Default alignment is zero, it
+  /// means the target does not care about loop alignment.  The alignment is
+  /// specified in log2(bytes). The target may also override
+  /// getPrefLoopAlignment to provide per-loop values.
+  void setPrefLoopAlignment(unsigned Align) {
+    PrefLoopAlignment = Align;
+  }
+
+  /// Set the minimum stack alignment of an argument (in log2(bytes)).
+  void setMinStackArgumentAlignment(unsigned Align) {
+    MinStackArgumentAlignment = Align;
+  }
+
+  /// Set the maximum atomic operation size supported by the
+  /// backend. Atomic operations greater than this size (as well as
+  /// ones that are not naturally aligned), will be expanded by
+  /// AtomicExpandPass into an __atomic_* library call.
+  void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
+    MaxAtomicSizeInBitsSupported = SizeInBits;
+  }
+
+  /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
+  void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
+    MinCmpXchgSizeInBits = SizeInBits;
+  }
+
+  /// Sets whether unaligned atomic operations are supported.
+  void setSupportsUnalignedAtomics(bool UnalignedSupported) {
+    SupportsUnalignedAtomics = UnalignedSupported;
+  }
+
+public:
+  //===--------------------------------------------------------------------===//
+  // Addressing mode description hooks (used by LSR etc).
+  //
+
+  /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
+  /// instructions reading the address. This allows as much computation as
+  /// possible to be done in the address mode for that operand. This hook lets
+  /// targets also pass back when this should be done on intrinsics which
+  /// load/store.
+  virtual bool getAddrModeArguments(IntrinsicInst * /*I*/,
+                                    SmallVectorImpl<Value*> &/*Ops*/,
+                                    Type *&/*AccessTy*/) const {
+    return false;
+  }
+
+  /// This represents an addressing mode of:
+  ///    BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
+  /// If BaseGV is null,  there is no BaseGV.
+  /// If BaseOffs is zero, there is no base offset.
+  /// If HasBaseReg is false, there is no base register.
+  /// If Scale is zero, there is no ScaleReg.  Scale of 1 indicates a reg with
+  /// no scale.
+  struct AddrMode {
+    GlobalValue *BaseGV = nullptr;
+    int64_t      BaseOffs = 0;
+    bool         HasBaseReg = false;
+    int64_t      Scale = 0;
+    AddrMode() = default;
+  };
+
+  /// Return true if the addressing mode represented by AM is legal for this
+  /// target, for a load/store of the specified type.
+  ///
+  /// The type may be VoidTy, in which case only return true if the addressing
+  /// mode is legal for a load/store of any legal type.  TODO: Handle
+  /// pre/postinc as well.
+  ///
+  /// If the address space cannot be determined, it will be -1.
+  ///
+  /// TODO: Remove default argument
+  virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
+                                     Type *Ty, unsigned AddrSpace,
+                                     Instruction *I = nullptr) const;
+
+  /// \brief Return the cost of the scaling factor used in the addressing mode
+  /// represented by AM for this target, for a load/store of the specified type.
+  ///
+  /// If the AM is supported, the return value must be >= 0.
+  /// If the AM is not supported, it returns a negative value.
+  /// TODO: Handle pre/postinc as well.
+  /// TODO: Remove default argument
+  virtual int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM,
+                                   Type *Ty, unsigned AS = 0) const {
+    // Default: assume that any scaling factor used in a legal AM is free.
+    if (isLegalAddressingMode(DL, AM, Ty, AS))
+      return 0;
+    return -1;
+  }
+
+  /// Return true if the specified immediate is legal icmp immediate, that is
+  /// the target has icmp instructions which can compare a register against the
+  /// immediate without having to materialize the immediate into a register.
+  virtual bool isLegalICmpImmediate(int64_t) const {
+    return true;
+  }
+
+  /// Return true if the specified immediate is legal add immediate, that is the
+  /// target has add instructions which can add a register with the immediate
+  /// without having to materialize the immediate into a register.
+  virtual bool isLegalAddImmediate(int64_t) const {
+    return true;
+  }
+
+  /// Return true if it's significantly cheaper to shift a vector by a uniform
+  /// scalar than by an amount which will vary across each lane. On x86, for
+  /// example, there is a "psllw" instruction for the former case, but no simple
+  /// instruction for a general "a << b" operation on vectors.
+  virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
+    return false;
+  }
+
+  /// Returns true if the opcode is a commutative binary operation.
+  virtual bool isCommutativeBinOp(unsigned Opcode) const {
+    // FIXME: This should get its info from the td file.
+    switch (Opcode) {
+    case ISD::ADD:
+    case ISD::SMIN:
+    case ISD::SMAX:
+    case ISD::UMIN:
+    case ISD::UMAX:
+    case ISD::MUL:
+    case ISD::MULHU:
+    case ISD::MULHS:
+    case ISD::SMUL_LOHI:
+    case ISD::UMUL_LOHI:
+    case ISD::FADD:
+    case ISD::FMUL:
+    case ISD::AND:
+    case ISD::OR:
+    case ISD::XOR:
+    case ISD::SADDO:
+    case ISD::UADDO:
+    case ISD::ADDC:
+    case ISD::ADDE:
+    case ISD::FMINNUM:
+    case ISD::FMAXNUM:
+    case ISD::FMINNAN:
+    case ISD::FMAXNAN:
+      return true;
+    default: return false;
+    }
+  }
+
+  /// Return true if it's free to truncate a value of type FromTy to type
+  /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
+  /// by referencing its sub-register AX.
+  /// Targets must return false when FromTy <= ToTy.
+  virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
+    return false;
+  }
+
+  /// Return true if a truncation from FromTy to ToTy is permitted when deciding
+  /// whether a call is in tail position. Typically this means that both results
+  /// would be assigned to the same register or stack slot, but it could mean
+  /// the target performs adequate checks of its own before proceeding with the
+  /// tail call.  Targets must return false when FromTy <= ToTy.
+  virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
+    return false;
+  }
+
+  virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const {
+    return false;
+  }
+
+  virtual bool isProfitableToHoist(Instruction *I) const { return true; }
+
+  /// Return true if the extension represented by \p I is free.
+  /// Unlikely the is[Z|FP]ExtFree family which is based on types,
+  /// this method can use the context provided by \p I to decide
+  /// whether or not \p I is free.
+  /// This method extends the behavior of the is[Z|FP]ExtFree family.
+  /// In other words, if is[Z|FP]Free returns true, then this method
+  /// returns true as well. The converse is not true.
+  /// The target can perform the adequate checks by overriding isExtFreeImpl.
+  /// \pre \p I must be a sign, zero, or fp extension.
+  bool isExtFree(const Instruction *I) const {
+    switch (I->getOpcode()) {
+    case Instruction::FPExt:
+      if (isFPExtFree(EVT::getEVT(I->getType()),
+                      EVT::getEVT(I->getOperand(0)->getType())))
+        return true;
+      break;
+    case Instruction::ZExt:
+      if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
+        return true;
+      break;
+    case Instruction::SExt:
+      break;
+    default:
+      llvm_unreachable("Instruction is not an extension");
+    }
+    return isExtFreeImpl(I);
+  }
+
+  /// Return true if \p Load and \p Ext can form an ExtLoad.
+  /// For example, in AArch64
+  ///   %L = load i8, i8* %ptr
+  ///   %E = zext i8 %L to i32
+  /// can be lowered into one load instruction
+  ///   ldrb w0, [x0]
+  bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
+                 const DataLayout &DL) const {
+    EVT VT = getValueType(DL, Ext->getType());
+    EVT LoadVT = getValueType(DL, Load->getType());
+
+    // If the load has other users and the truncate is not free, the ext
+    // probably isn't free.
+    if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
+        !isTruncateFree(Ext->getType(), Load->getType()))
+      return false;
+
+    // Check whether the target supports casts folded into loads.
+    unsigned LType;
+    if (isa<ZExtInst>(Ext))
+      LType = ISD::ZEXTLOAD;
+    else {
+      assert(isa<SExtInst>(Ext) && "Unexpected ext type!");
+      LType = ISD::SEXTLOAD;
+    }
+
+    return isLoadExtLegal(LType, VT, LoadVT);
+  }
+
+  /// Return true if any actual instruction that defines a value of type FromTy
+  /// implicitly zero-extends the value to ToTy in the result register.
+  ///
+  /// The function should return true when it is likely that the truncate can
+  /// be freely folded with an instruction defining a value of FromTy. If
+  /// the defining instruction is unknown (because you're looking at a
+  /// function argument, PHI, etc.) then the target may require an
+  /// explicit truncate, which is not necessarily free, but this function
+  /// does not deal with those cases.
+  /// Targets must return false when FromTy >= ToTy.
+  virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
+    return false;
+  }
+
+  virtual bool isZExtFree(EVT FromTy, EVT ToTy) const {
+    return false;
+  }
+
+  /// Return true if the target supplies and combines to a paired load
+  /// two loaded values of type LoadedType next to each other in memory.
+  /// RequiredAlignment gives the minimal alignment constraints that must be met
+  /// to be able to select this paired load.
+  ///
+  /// This information is *not* used to generate actual paired loads, but it is
+  /// used to generate a sequence of loads that is easier to combine into a
+  /// paired load.
+  /// For instance, something like this:
+  /// a = load i64* addr
+  /// b = trunc i64 a to i32
+  /// c = lshr i64 a, 32
+  /// d = trunc i64 c to i32
+  /// will be optimized into:
+  /// b = load i32* addr1
+  /// d = load i32* addr2
+  /// Where addr1 = addr2 +/- sizeof(i32).
+  ///
+  /// In other words, unless the target performs a post-isel load combining,
+  /// this information should not be provided because it will generate more
+  /// loads.
+  virtual bool hasPairedLoad(EVT /*LoadedType*/,
+                             unsigned & /*RequiredAlignment*/) const {
+    return false;
+  }
+
+  /// Return true if the target has a vector blend instruction.
+  virtual bool hasVectorBlend() const { return false; }
+
+  /// \brief Get the maximum supported factor for interleaved memory accesses.
+  /// Default to be the minimum interleave factor: 2.
+  virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
+
+  /// \brief Lower an interleaved load to target specific intrinsics. Return
+  /// true on success.
+  ///
+  /// \p LI is the vector load instruction.
+  /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
+  /// \p Indices is the corresponding indices for each shufflevector.
+  /// \p Factor is the interleave factor.
+  virtual bool lowerInterleavedLoad(LoadInst *LI,
+                                    ArrayRef<ShuffleVectorInst *> Shuffles,
+                                    ArrayRef<unsigned> Indices,
+                                    unsigned Factor) const {
+    return false;
+  }
+
+  /// \brief Lower an interleaved store to target specific intrinsics. Return
+  /// true on success.
+  ///
+  /// \p SI is the vector store instruction.
+  /// \p SVI is the shufflevector to RE-interleave the stored vector.
+  /// \p Factor is the interleave factor.
+  virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
+                                     unsigned Factor) const {
+    return false;
+  }
+
+  /// Return true if zero-extending the specific node Val to type VT2 is free
+  /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
+  /// because it's folded such as X86 zero-extending loads).
+  virtual bool isZExtFree(SDValue Val, EVT VT2) const {
+    return isZExtFree(Val.getValueType(), VT2);
+  }
+
+  /// Return true if an fpext operation is free (for instance, because
+  /// single-precision floating-point numbers are implicitly extended to
+  /// double-precision).
+  virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const {
+    assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() &&
+           "invalid fpext types");
+    return false;
+  }
+
+  /// Return true if an fpext operation input to an \p Opcode operation is free
+  /// (for instance, because half-precision floating-point numbers are
+  /// implicitly extended to float-precision) for an FMA instruction.
+  virtual bool isFPExtFoldable(unsigned Opcode, EVT DestVT, EVT SrcVT) const {
+    assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
+           "invalid fpext types");
+    return isFPExtFree(DestVT, SrcVT);
+  }
+
+  /// Return true if folding a vector load into ExtVal (a sign, zero, or any
+  /// extend node) is profitable.
+  virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
+
+  /// Return true if an fneg operation is free to the point where it is never
+  /// worthwhile to replace it with a bitwise operation.
+  virtual bool isFNegFree(EVT VT) const {
+    assert(VT.isFloatingPoint());
+    return false;
+  }
+
+  /// Return true if an fabs operation is free to the point where it is never
+  /// worthwhile to replace it with a bitwise operation.
+  virtual bool isFAbsFree(EVT VT) const {
+    assert(VT.isFloatingPoint());
+    return false;
+  }
+
+  /// Return true if an FMA operation is faster than a pair of fmul and fadd
+  /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
+  /// returns true, otherwise fmuladd is expanded to fmul + fadd.
+  ///
+  /// NOTE: This may be called before legalization on types for which FMAs are
+  /// not legal, but should return true if those types will eventually legalize
+  /// to types that support FMAs. After legalization, it will only be called on
+  /// types that support FMAs (via Legal or Custom actions)
+  virtual bool isFMAFasterThanFMulAndFAdd(EVT) const {
+    return false;
+  }
+
+  /// Return true if it's profitable to narrow operations of type VT1 to
+  /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
+  /// i32 to i16.
+  virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
+    return false;
+  }
+
+  /// \brief Return true if it is beneficial to convert a load of a constant to
+  /// just the constant itself.
+  /// On some targets it might be more efficient to use a combination of
+  /// arithmetic instructions to materialize the constant instead of loading it
+  /// from a constant pool.
+  virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
+                                                 Type *Ty) const {
+    return false;
+  }
+
+  /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type
+  /// from this source type with this index. This is needed because
+  /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of
+  /// the first element, and only the target knows which lowering is cheap.
+  virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
+                                       unsigned Index) const {
+    return false;
+  }
+
+  // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
+  // even if the vector itself has multiple uses.
+  virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
+    return false;
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Runtime Library hooks
+  //
+
+  /// Rename the default libcall routine name for the specified libcall.
+  void setLibcallName(RTLIB::Libcall Call, const char *Name) {
+    LibcallRoutineNames[Call] = Name;
+  }
+
+  /// Get the libcall routine name for the specified libcall.
+  const char *getLibcallName(RTLIB::Libcall Call) const {
+    return LibcallRoutineNames[Call];
+  }
+
+  /// Override the default CondCode to be used to test the result of the
+  /// comparison libcall against zero.
+  void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
+    CmpLibcallCCs[Call] = CC;
+  }
+
+  /// Get the CondCode that's to be used to test the result of the comparison
+  /// libcall against zero.
+  ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
+    return CmpLibcallCCs[Call];
+  }
+
+  /// Set the CallingConv that should be used for the specified libcall.
+  void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
+    LibcallCallingConvs[Call] = CC;
+  }
+
+  /// Get the CallingConv that should be used for the specified libcall.
+  CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
+    return LibcallCallingConvs[Call];
+  }
+
+  /// Execute target specific actions to finalize target lowering.
+  /// This is used to set extra flags in MachineFrameInformation and freezing
+  /// the set of reserved registers.
+  /// The default implementation just freezes the set of reserved registers.
+  virtual void finalizeLowering(MachineFunction &MF) const;
+
+private:
+  const TargetMachine &TM;
+
+  /// Tells the code generator that the target has multiple (allocatable)
+  /// condition registers that can be used to store the results of comparisons
+  /// for use by selects and conditional branches. With multiple condition
+  /// registers, the code generator will not aggressively sink comparisons into
+  /// the blocks of their users.
+  bool HasMultipleConditionRegisters;
+
+  /// Tells the code generator that the target has BitExtract instructions.
+  /// The code generator will aggressively sink "shift"s into the blocks of
+  /// their users if the users will generate "and" instructions which can be
+  /// combined with "shift" to BitExtract instructions.
+  bool HasExtractBitsInsn;
+
+  /// Tells the code generator to bypass slow divide or remainder
+  /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
+  /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
+  /// div/rem when the operands are positive and less than 256.
+  DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
+
+  /// Tells the code generator that it shouldn't generate extra flow control
+  /// instructions and should attempt to combine flow control instructions via
+  /// predication.
+  bool JumpIsExpensive;
+
+  /// Whether the target supports or cares about preserving floating point
+  /// exception behavior.
+  bool HasFloatingPointExceptions;
+
+  /// This target prefers to use _setjmp to implement llvm.setjmp.
+  ///
+  /// Defaults to false.
+  bool UseUnderscoreSetJmp;
+
+  /// This target prefers to use _longjmp to implement llvm.longjmp.
+  ///
+  /// Defaults to false.
+  bool UseUnderscoreLongJmp;
+
+  /// Information about the contents of the high-bits in boolean values held in
+  /// a type wider than i1. See getBooleanContents.
+  BooleanContent BooleanContents;
+
+  /// Information about the contents of the high-bits in boolean values held in
+  /// a type wider than i1. See getBooleanContents.
+  BooleanContent BooleanFloatContents;
+
+  /// Information about the contents of the high-bits in boolean vector values
+  /// when the element type is wider than i1. See getBooleanContents.
+  BooleanContent BooleanVectorContents;
+
+  /// The target scheduling preference: shortest possible total cycles or lowest
+  /// register usage.
+  Sched::Preference SchedPreferenceInfo;
+
+  /// The size, in bytes, of the target's jmp_buf buffers
+  unsigned JumpBufSize;
+
+  /// The alignment, in bytes, of the target's jmp_buf buffers
+  unsigned JumpBufAlignment;
+
+  /// The minimum alignment that any argument on the stack needs to have.
+  unsigned MinStackArgumentAlignment;
+
+  /// The minimum function alignment (used when optimizing for size, and to
+  /// prevent explicitly provided alignment from leading to incorrect code).
+  unsigned MinFunctionAlignment;
+
+  /// The preferred function alignment (used when alignment unspecified and
+  /// optimizing for speed).
+  unsigned PrefFunctionAlignment;
+
+  /// The preferred loop alignment.
+  unsigned PrefLoopAlignment;
+
+  /// Size in bits of the maximum atomics size the backend supports.
+  /// Accesses larger than this will be expanded by AtomicExpandPass.
+  unsigned MaxAtomicSizeInBitsSupported;
+
+  /// Size in bits of the minimum cmpxchg or ll/sc operation the
+  /// backend supports.
+  unsigned MinCmpXchgSizeInBits;
+
+  /// This indicates if the target supports unaligned atomic operations.
+  bool SupportsUnalignedAtomics;
+
+  /// If set to a physical register, this specifies the register that
+  /// llvm.savestack/llvm.restorestack should save and restore.
+  unsigned StackPointerRegisterToSaveRestore;
+
+  /// This indicates the default register class to use for each ValueType the
+  /// target supports natively.
+  const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
+  unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
+  MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
+
+  /// This indicates the "representative" register class to use for each
+  /// ValueType the target supports natively. This information is used by the
+  /// scheduler to track register pressure. By default, the representative
+  /// register class is the largest legal super-reg register class of the
+  /// register class of the specified type. e.g. On x86, i8, i16, and i32's
+  /// representative class would be GR32.
+  const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
+
+  /// This indicates the "cost" of the "representative" register class for each
+  /// ValueType. The cost is used by the scheduler to approximate register
+  /// pressure.
+  uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
+
+  /// For any value types we are promoting or expanding, this contains the value
+  /// type that we are changing to.  For Expanded types, this contains one step
+  /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
+  /// (e.g. i64 -> i16).  For types natively supported by the system, this holds
+  /// the same type (e.g. i32 -> i32).
+  MVT TransformToType[MVT::LAST_VALUETYPE];
+
+  /// For each operation and each value type, keep a LegalizeAction that
+  /// indicates how instruction selection should deal with the operation.  Most
+  /// operations are Legal (aka, supported natively by the target), but
+  /// operations that are not should be described.  Note that operations on
+  /// non-legal value types are not described here.
+  LegalizeAction OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
+
+  /// For each load extension type and each value type, keep a LegalizeAction
+  /// that indicates how instruction selection should deal with a load of a
+  /// specific value type and extension type. Uses 4-bits to store the action
+  /// for each of the 4 load ext types.
+  uint16_t LoadExtActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
+
+  /// For each value type pair keep a LegalizeAction that indicates whether a
+  /// truncating store of a specific value type and truncating type is legal.
+  LegalizeAction TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
+
+  /// For each indexed mode and each value type, keep a pair of LegalizeAction
+  /// that indicates how instruction selection should deal with the load /
+  /// store.
+  ///
+  /// The first dimension is the value_type for the reference. The second
+  /// dimension represents the various modes for load store.
+  uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
+
+  /// For each condition code (ISD::CondCode) keep a LegalizeAction that
+  /// indicates how instruction selection should deal with the condition code.
+  ///
+  /// Because each CC action takes up 4 bits, we need to have the array size be
+  /// large enough to fit all of the value types. This can be done by rounding
+  /// up the MVT::LAST_VALUETYPE value to the next multiple of 8.
+  uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 7) / 8];
+
+protected:
+  ValueTypeActionImpl ValueTypeActions;
+
+private:
+  LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
+
+  /// Targets can specify ISD nodes that they would like PerformDAGCombine
+  /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
+  /// array.
+  unsigned char
+  TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
+
+  /// For operations that must be promoted to a specific type, this holds the
+  /// destination type.  This map should be sparse, so don't hold it as an
+  /// array.
+  ///
+  /// Targets add entries to this map with AddPromotedToType(..), clients access
+  /// this with getTypeToPromoteTo(..).
+  std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
+    PromoteToType;
+
+  /// Stores the name each libcall.
+  const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL + 1];
+
+  /// The ISD::CondCode that should be used to test the result of each of the
+  /// comparison libcall against zero.
+  ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
+
+  /// Stores the CallingConv that should be used for each libcall.
+  CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
+
+  /// Set default libcall names and calling conventions.
+  void InitLibcalls(const Triple &TT);
+
+protected:
+  /// Return true if the extension represented by \p I is free.
+  /// \pre \p I is a sign, zero, or fp extension and
+  ///      is[Z|FP]ExtFree of the related types is not true.
+  virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
+
+  /// Depth that GatherAllAliases should should continue looking for chain
+  /// dependencies when trying to find a more preferable chain. As an
+  /// approximation, this should be more than the number of consecutive stores
+  /// expected to be merged.
+  unsigned GatherAllAliasesMaxDepth;
+
+  /// \brief Specify maximum number of store instructions per memset call.
+  ///
+  /// When lowering \@llvm.memset this field specifies the maximum number of
+  /// store operations that may be substituted for the call to memset. Targets
+  /// must set this value based on the cost threshold for that target. Targets
+  /// should assume that the memset will be done using as many of the largest
+  /// store operations first, followed by smaller ones, if necessary, per
+  /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
+  /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
+  /// store.  This only applies to setting a constant array of a constant size.
+  unsigned MaxStoresPerMemset;
+
+  /// Maximum number of stores operations that may be substituted for the call
+  /// to memset, used for functions with OptSize attribute.
+  unsigned MaxStoresPerMemsetOptSize;
+
+  /// \brief Specify maximum bytes of store instructions per memcpy call.
+  ///
+  /// When lowering \@llvm.memcpy this field specifies the maximum number of
+  /// store operations that may be substituted for a call to memcpy. Targets
+  /// must set this value based on the cost threshold for that target. Targets
+  /// should assume that the memcpy will be done using as many of the largest
+  /// store operations first, followed by smaller ones, if necessary, per
+  /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
+  /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
+  /// and one 1-byte store. This only applies to copying a constant array of
+  /// constant size.
+  unsigned MaxStoresPerMemcpy;
+
+  /// Maximum number of store operations that may be substituted for a call to
+  /// memcpy, used for functions with OptSize attribute.
+  unsigned MaxStoresPerMemcpyOptSize;
+  unsigned MaxLoadsPerMemcmp;
+  unsigned MaxLoadsPerMemcmpOptSize;
+
+  /// \brief Specify maximum bytes of store instructions per memmove call.
+  ///
+  /// When lowering \@llvm.memmove this field specifies the maximum number of
+  /// store instructions that may be substituted for a call to memmove. Targets
+  /// must set this value based on the cost threshold for that target. Targets
+  /// should assume that the memmove will be done using as many of the largest
+  /// store operations first, followed by smaller ones, if necessary, per
+  /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
+  /// with 8-bit alignment would result in nine 1-byte stores.  This only
+  /// applies to copying a constant array of constant size.
+  unsigned MaxStoresPerMemmove;
+
+  /// Maximum number of store instructions that may be substituted for a call to
+  /// memmove, used for functions with OptSize attribute.
+  unsigned MaxStoresPerMemmoveOptSize;
+
+  /// Tells the code generator that select is more expensive than a branch if
+  /// the branch is usually predicted right.
+  bool PredictableSelectIsExpensive;
+
+  /// \see enableExtLdPromotion.
+  bool EnableExtLdPromotion;
+
+  /// Return true if the value types that can be represented by the specified
+  /// register class are all legal.
+  bool isLegalRC(const TargetRegisterInfo &TRI,
+                 const TargetRegisterClass &RC) const;
+
+  /// Replace/modify any TargetFrameIndex operands with a targte-dependent
+  /// sequence of memory operands that is recognized by PrologEpilogInserter.
+  MachineBasicBlock *emitPatchPoint(MachineInstr &MI,
+                                    MachineBasicBlock *MBB) const;
+
+  /// Replace/modify the XRay custom event operands with target-dependent
+  /// details.
+  MachineBasicBlock *emitXRayCustomEvent(MachineInstr &MI,
+                                         MachineBasicBlock *MBB) const;
+};
+
+/// This class defines information used to lower LLVM code to legal SelectionDAG
+/// operators that the target instruction selector can accept natively.
+///
+/// This class also defines callbacks that targets must implement to lower
+/// target-specific constructs to SelectionDAG operators.
+class TargetLowering : public TargetLoweringBase {
+public:
+  struct DAGCombinerInfo;
+
+  TargetLowering(const TargetLowering &) = delete;
+  TargetLowering &operator=(const TargetLowering &) = delete;
+
+  /// NOTE: The TargetMachine owns TLOF.
+  explicit TargetLowering(const TargetMachine &TM);
+
+  bool isPositionIndependent() const;
+
+  virtual bool isSDNodeSourceOfDivergence(const SDNode *N,
+                                          FunctionLoweringInfo *FLI,
+                                          DivergenceAnalysis *DA) const {
+    return false;
+  }
+
+  virtual bool isSDNodeAlwaysUniform(const SDNode * N) const {
+    return false;
+  }
+
+  /// Returns true by value, base pointer and offset pointer and addressing mode
+  /// by reference if the node's address can be legally represented as
+  /// pre-indexed load / store address.
+  virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
+                                         SDValue &/*Offset*/,
+                                         ISD::MemIndexedMode &/*AM*/,
+                                         SelectionDAG &/*DAG*/) const {
+    return false;
+  }
+
+  /// Returns true by value, base pointer and offset pointer and addressing mode
+  /// by reference if this node can be combined with a load / store to form a
+  /// post-indexed load / store.
+  virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
+                                          SDValue &/*Base*/,
+                                          SDValue &/*Offset*/,
+                                          ISD::MemIndexedMode &/*AM*/,
+                                          SelectionDAG &/*DAG*/) const {
+    return false;
+  }
+
+  /// Return the entry encoding for a jump table in the current function.  The
+  /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
+  virtual unsigned getJumpTableEncoding() const;
+
+  virtual const MCExpr *
+  LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
+                            const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
+                            MCContext &/*Ctx*/) const {
+    llvm_unreachable("Need to implement this hook if target has custom JTIs");
+  }
+
+  /// Returns relocation base for the given PIC jumptable.
+  virtual SDValue getPICJumpTableRelocBase(SDValue Table,
+                                           SelectionDAG &DAG) const;
+
+  /// This returns the relocation base for the given PIC jumptable, the same as
+  /// getPICJumpTableRelocBase, but as an MCExpr.
+  virtual const MCExpr *
+  getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
+                               unsigned JTI, MCContext &Ctx) const;
+
+  /// Return true if folding a constant offset with the given GlobalAddress is
+  /// legal.  It is frequently not legal in PIC relocation models.
+  virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
+
+  bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
+                            SDValue &Chain) const;
+
+  void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
+                           SDValue &NewRHS, ISD::CondCode &CCCode,
+                           const SDLoc &DL) const;
+
+  /// Returns a pair of (return value, chain).
+  /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
+  std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
+                                          EVT RetVT, ArrayRef<SDValue> Ops,
+                                          bool isSigned, const SDLoc &dl,
+                                          bool doesNotReturn = false,
+                                          bool isReturnValueUsed = true) const;
+
+  /// Check whether parameters to a call that are passed in callee saved
+  /// registers are the same as from the calling function.  This needs to be
+  /// checked for tail call eligibility.
+  bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
+      const uint32_t *CallerPreservedMask,
+      const SmallVectorImpl<CCValAssign> &ArgLocs,
+      const SmallVectorImpl<SDValue> &OutVals) const;
+
+  //===--------------------------------------------------------------------===//
+  // TargetLowering Optimization Methods
+  //
+
+  /// A convenience struct that encapsulates a DAG, and two SDValues for
+  /// returning information from TargetLowering to its clients that want to
+  /// combine.
+  struct TargetLoweringOpt {
+    SelectionDAG &DAG;
+    bool LegalTys;
+    bool LegalOps;
+    SDValue Old;
+    SDValue New;
+
+    explicit TargetLoweringOpt(SelectionDAG &InDAG,
+                               bool LT, bool LO) :
+      DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
+
+    bool LegalTypes() const { return LegalTys; }
+    bool LegalOperations() const { return LegalOps; }
+
+    bool CombineTo(SDValue O, SDValue N) {
+      Old = O;
+      New = N;
+      return true;
+    }
+  };
+
+  /// Check to see if the specified operand of the specified instruction is a
+  /// constant integer.  If so, check to see if there are any bits set in the
+  /// constant that are not demanded.  If so, shrink the constant and return
+  /// true.
+  bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
+                              TargetLoweringOpt &TLO) const;
+
+  // Target hook to do target-specific const optimization, which is called by
+  // ShrinkDemandedConstant. This function should return true if the target
+  // doesn't want ShrinkDemandedConstant to further optimize the constant.
+  virtual bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
+                                            TargetLoweringOpt &TLO) const {
+    return false;
+  }
+
+  /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.  This
+  /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
+  /// generalized for targets with other types of implicit widening casts.
+  bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
+                        TargetLoweringOpt &TLO) const;
+
+  /// Helper for SimplifyDemandedBits that can simplify an operation with
+  /// multiple uses.  This function simplifies operand \p OpIdx of \p User and
+  /// then updates \p User with the simplified version. No other uses of
+  /// \p OpIdx are updated. If \p User is the only user of \p OpIdx, this
+  /// function behaves exactly like function SimplifyDemandedBits declared
+  /// below except that it also updates the DAG by calling
+  /// DCI.CommitTargetLoweringOpt.
+  bool SimplifyDemandedBits(SDNode *User, unsigned OpIdx, const APInt &Demanded,
+                            DAGCombinerInfo &DCI, TargetLoweringOpt &TLO) const;
+
+  /// Look at Op.  At this point, we know that only the DemandedMask bits of the
+  /// result of Op are ever used downstream.  If we can use this information to
+  /// simplify Op, create a new simplified DAG node and return true, returning
+  /// the original and new nodes in Old and New.  Otherwise, analyze the
+  /// expression and return a mask of KnownOne and KnownZero bits for the
+  /// expression (used to simplify the caller).  The KnownZero/One bits may only
+  /// be accurate for those bits in the DemandedMask.
+  /// \p AssumeSingleUse When this parameter is true, this function will
+  ///    attempt to simplify \p Op even if there are multiple uses.
+  ///    Callers are responsible for correctly updating the DAG based on the
+  ///    results of this function, because simply replacing replacing TLO.Old
+  ///    with TLO.New will be incorrect when this parameter is true and TLO.Old
+  ///    has multiple uses.
+  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
+                            KnownBits &Known,
+                            TargetLoweringOpt &TLO,
+                            unsigned Depth = 0,
+                            bool AssumeSingleUse = false) const;
+
+  /// Helper wrapper around SimplifyDemandedBits
+  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
+                            DAGCombinerInfo &DCI) const;
+
+  /// Look at Vector Op. At this point, we know that only the DemandedElts
+  /// elements of the result of Op are ever used downstream.  If we can use
+  /// this information to simplify Op, create a new simplified DAG node and
+  /// return true, storing the original and new nodes in TLO.
+  /// Otherwise, analyze the expression and return a mask of KnownUndef and
+  /// KnownZero elements for the expression (used to simplify the caller).
+  /// The KnownUndef/Zero elements may only be accurate for those bits
+  /// in the DemandedMask.
+  /// \p AssumeSingleUse When this parameter is true, this function will
+  ///    attempt to simplify \p Op even if there are multiple uses.
+  ///    Callers are responsible for correctly updating the DAG based on the
+  ///    results of this function, because simply replacing replacing TLO.Old
+  ///    with TLO.New will be incorrect when this parameter is true and TLO.Old
+  ///    has multiple uses.
+  bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
+                                  APInt &KnownUndef, APInt &KnownZero,
+                                  TargetLoweringOpt &TLO, unsigned Depth = 0,
+                                  bool AssumeSingleUse = false) const;
+
+  /// Helper wrapper around SimplifyDemandedVectorElts
+  bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
+                                  APInt &KnownUndef, APInt &KnownZero,
+                                  DAGCombinerInfo &DCI) const;
+
+  /// Determine which of the bits specified in Mask are known to be either zero
+  /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
+  /// argument allows us to only collect the known bits that are shared by the
+  /// requested vector elements.
+  virtual void computeKnownBitsForTargetNode(const SDValue Op,
+                                             KnownBits &Known,
+                                             const APInt &DemandedElts,
+                                             const SelectionDAG &DAG,
+                                             unsigned Depth = 0) const;
+
+  /// Determine which of the bits of FrameIndex \p FIOp are known to be 0.
+  /// Default implementation computes low bits based on alignment
+  /// information. This should preserve known bits passed into it.
+  virtual void computeKnownBitsForFrameIndex(const SDValue FIOp,
+                                             KnownBits &Known,
+                                             const APInt &DemandedElts,
+                                             const SelectionDAG &DAG,
+                                             unsigned Depth = 0) const;
+
+  /// This method can be implemented by targets that want to expose additional
+  /// information about sign bits to the DAG Combiner. The DemandedElts
+  /// argument allows us to only collect the minimum sign bits that are shared
+  /// by the requested vector elements.
+  virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
+                                                   const APInt &DemandedElts,
+                                                   const SelectionDAG &DAG,
+                                                   unsigned Depth = 0) const;
+
+  /// Attempt to simplify any target nodes based on the demanded vector
+  /// elements, returning true on success. Otherwise, analyze the expression and
+  /// return a mask of KnownUndef and KnownZero elements for the expression
+  /// (used to simplify the caller). The KnownUndef/Zero elements may only be
+  /// accurate for those bits in the DemandedMask
+  virtual bool SimplifyDemandedVectorEltsForTargetNode(
+      SDValue Op, const APInt &DemandedElts, APInt &KnownUndef,
+      APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const;
+
+  struct DAGCombinerInfo {
+    void *DC;  // The DAG Combiner object.
+    CombineLevel Level;
+    bool CalledByLegalizer;
+
+  public:
+    SelectionDAG &DAG;
+
+    DAGCombinerInfo(SelectionDAG &dag, CombineLevel level,  bool cl, void *dc)
+      : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
+
+    bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
+    bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
+    bool isAfterLegalizeDAG() const {
+      return Level == AfterLegalizeDAG;
+    }
+    CombineLevel getDAGCombineLevel() { return Level; }
+    bool isCalledByLegalizer() const { return CalledByLegalizer; }
+
+    void AddToWorklist(SDNode *N);
+    SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
+    SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
+    SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
+
+    void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
+  };
+
+  /// Return if the N is a constant or constant vector equal to the true value
+  /// from getBooleanContents().
+  bool isConstTrueVal(const SDNode *N) const;
+
+  /// Return if the N is a constant or constant vector equal to the false value
+  /// from getBooleanContents().
+  bool isConstFalseVal(const SDNode *N) const;
+
+  /// Return if \p N is a True value when extended to \p VT.
+  bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool Signed) const;
+
+  /// Try to simplify a setcc built with the specified operands and cc. If it is
+  /// unable to simplify it, return a null SDValue.
+  SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
+                        bool foldBooleans, DAGCombinerInfo &DCI,
+                        const SDLoc &dl) const;
+
+  // For targets which wrap address, unwrap for analysis.
+  virtual SDValue unwrapAddress(SDValue N) const { return N; }
+
+  /// Returns true (and the GlobalValue and the offset) if the node is a
+  /// GlobalAddress + offset.
+  virtual bool
+  isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
+
+  /// This method will be invoked for all target nodes and for any
+  /// target-independent nodes that the target has registered with invoke it
+  /// for.
+  ///
+  /// The semantics are as follows:
+  /// Return Value:
+  ///   SDValue.Val == 0   - No change was made
+  ///   SDValue.Val == N   - N was replaced, is dead, and is already handled.
+  ///   otherwise          - N should be replaced by the returned Operand.
+  ///
+  /// In addition, methods provided by DAGCombinerInfo may be used to perform
+  /// more complex transformations.
+  ///
+  virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+
+  /// Return true if it is profitable to move a following shift through this
+  //  node, adjusting any immediate operands as necessary to preserve semantics.
+  //  This transformation may not be desirable if it disrupts a particularly
+  //  auspicious target-specific tree (e.g. bitfield extraction in AArch64).
+  //  By default, it returns true.
+  virtual bool isDesirableToCommuteWithShift(const SDNode *N) const {
+    return true;
+  }
+
+  // Return true if it is profitable to combine a BUILD_VECTOR with a stride-pattern
+  // to a shuffle and a truncate.
+  // Example of such a combine:
+  // v4i32 build_vector((extract_elt V, 1),
+  //                    (extract_elt V, 3),
+  //                    (extract_elt V, 5),
+  //                    (extract_elt V, 7))
+  //  -->
+  // v4i32 truncate (bitcast (shuffle<1,u,3,u,5,u,7,u> V, u) to v4i64)
+  virtual bool isDesirableToCombineBuildVectorToShuffleTruncate(
+      ArrayRef<int> ShuffleMask, EVT SrcVT, EVT TruncVT) const {
+    return false;
+  }
+
+  /// Return true if the target has native support for the specified value type
+  /// and it is 'desirable' to use the type for the given node type. e.g. On x86
+  /// i16 is legal, but undesirable since i16 instruction encodings are longer
+  /// and some i16 instructions are slow.
+  virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
+    // By default, assume all legal types are desirable.
+    return isTypeLegal(VT);
+  }
+
+  /// Return true if it is profitable for dag combiner to transform a floating
+  /// point op of specified opcode to a equivalent op of an integer
+  /// type. e.g. f32 load -> i32 load can be profitable on ARM.
+  virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
+                                                 EVT /*VT*/) const {
+    return false;
+  }
+
+  /// This method query the target whether it is beneficial for dag combiner to
+  /// promote the specified node. If true, it should return the desired
+  /// promotion type by reference.
+  virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
+    return false;
+  }
+
+  /// Return true if the target supports swifterror attribute. It optimizes
+  /// loads and stores to reading and writing a specific register.
+  virtual bool supportSwiftError() const {
+    return false;
+  }
+
+  /// Return true if the target supports that a subset of CSRs for the given
+  /// machine function is handled explicitly via copies.
+  virtual bool supportSplitCSR(MachineFunction *MF) const {
+    return false;
+  }
+
+  /// Perform necessary initialization to handle a subset of CSRs explicitly
+  /// via copies. This function is called at the beginning of instruction
+  /// selection.
+  virtual void initializeSplitCSR(MachineBasicBlock *Entry) const {
+    llvm_unreachable("Not Implemented");
+  }
+
+  /// Insert explicit copies in entry and exit blocks. We copy a subset of
+  /// CSRs to virtual registers in the entry block, and copy them back to
+  /// physical registers in the exit blocks. This function is called at the end
+  /// of instruction selection.
+  virtual void insertCopiesSplitCSR(
+      MachineBasicBlock *Entry,
+      const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
+    llvm_unreachable("Not Implemented");
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Lowering methods - These methods must be implemented by targets so that
+  // the SelectionDAGBuilder code knows how to lower these.
+  //
+
+  /// This hook must be implemented to lower the incoming (formal) arguments,
+  /// described by the Ins array, into the specified DAG. The implementation
+  /// should fill in the InVals array with legal-type argument values, and
+  /// return the resulting token chain value.
+  virtual SDValue LowerFormalArguments(
+      SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
+      const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/,
+      SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const {
+    llvm_unreachable("Not Implemented");
+  }
+
+  /// This structure contains all information that is necessary for lowering
+  /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
+  /// needs to lower a call, and targets will see this struct in their LowerCall
+  /// implementation.
+  struct CallLoweringInfo {
+    SDValue Chain;
+    Type *RetTy = nullptr;
+    bool RetSExt           : 1;
+    bool RetZExt           : 1;
+    bool IsVarArg          : 1;
+    bool IsInReg           : 1;
+    bool DoesNotReturn     : 1;
+    bool IsReturnValueUsed : 1;
+    bool IsConvergent      : 1;
+    bool IsPatchPoint      : 1;
+
+    // IsTailCall should be modified by implementations of
+    // TargetLowering::LowerCall that perform tail call conversions.
+    bool IsTailCall = false;
+
+    // Is Call lowering done post SelectionDAG type legalization.
+    bool IsPostTypeLegalization = false;
+
+    unsigned NumFixedArgs = -1;
+    CallingConv::ID CallConv = CallingConv::C;
+    SDValue Callee;
+    ArgListTy Args;
+    SelectionDAG &DAG;
+    SDLoc DL;
+    ImmutableCallSite CS;
+    SmallVector<ISD::OutputArg, 32> Outs;
+    SmallVector<SDValue, 32> OutVals;
+    SmallVector<ISD::InputArg, 32> Ins;
+    SmallVector<SDValue, 4> InVals;
+
+    CallLoweringInfo(SelectionDAG &DAG)
+        : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false),
+          DoesNotReturn(false), IsReturnValueUsed(true), IsConvergent(false),
+          IsPatchPoint(false), DAG(DAG) {}
+
+    CallLoweringInfo &setDebugLoc(const SDLoc &dl) {
+      DL = dl;
+      return *this;
+    }
+
+    CallLoweringInfo &setChain(SDValue InChain) {
+      Chain = InChain;
+      return *this;
+    }
+
+    // setCallee with target/module-specific attributes
+    CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType,
+                                   SDValue Target, ArgListTy &&ArgsList) {
+      RetTy = ResultType;
+      Callee = Target;
+      CallConv = CC;
+      NumFixedArgs = ArgsList.size();
+      Args = std::move(ArgsList);
+
+      DAG.getTargetLoweringInfo().markLibCallAttributes(
+          &(DAG.getMachineFunction()), CC, Args);
+      return *this;
+    }
+
+    CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
+                                SDValue Target, ArgListTy &&ArgsList) {
+      RetTy = ResultType;
+      Callee = Target;
+      CallConv = CC;
+      NumFixedArgs = ArgsList.size();
+      Args = std::move(ArgsList);
+      return *this;
+    }
+
+    CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
+                                SDValue Target, ArgListTy &&ArgsList,
+                                ImmutableCallSite Call) {
+      RetTy = ResultType;
+
+      IsInReg = Call.hasRetAttr(Attribute::InReg);
+      DoesNotReturn =
+          Call.doesNotReturn() ||
+          (!Call.isInvoke() &&
+           isa<UnreachableInst>(Call.getInstruction()->getNextNode()));
+      IsVarArg = FTy->isVarArg();
+      IsReturnValueUsed = !Call.getInstruction()->use_empty();
+      RetSExt = Call.hasRetAttr(Attribute::SExt);
+      RetZExt = Call.hasRetAttr(Attribute::ZExt);
+
+      Callee = Target;
+
+      CallConv = Call.getCallingConv();
+      NumFixedArgs = FTy->getNumParams();
+      Args = std::move(ArgsList);
+
+      CS = Call;
+
+      return *this;
+    }
+
+    CallLoweringInfo &setInRegister(bool Value = true) {
+      IsInReg = Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setNoReturn(bool Value = true) {
+      DoesNotReturn = Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setVarArg(bool Value = true) {
+      IsVarArg = Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setTailCall(bool Value = true) {
+      IsTailCall = Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setDiscardResult(bool Value = true) {
+      IsReturnValueUsed = !Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setConvergent(bool Value = true) {
+      IsConvergent = Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setSExtResult(bool Value = true) {
+      RetSExt = Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setZExtResult(bool Value = true) {
+      RetZExt = Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setIsPatchPoint(bool Value = true) {
+      IsPatchPoint = Value;
+      return *this;
+    }
+
+    CallLoweringInfo &setIsPostTypeLegalization(bool Value=true) {
+      IsPostTypeLegalization = Value;
+      return *this;
+    }
+
+    ArgListTy &getArgs() {
+      return Args;
+    }
+  };
+
+  /// This function lowers an abstract call to a function into an actual call.
+  /// This returns a pair of operands.  The first element is the return value
+  /// for the function (if RetTy is not VoidTy).  The second element is the
+  /// outgoing token chain. It calls LowerCall to do the actual lowering.
+  std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
+
+  /// This hook must be implemented to lower calls into the specified
+  /// DAG. The outgoing arguments to the call are described by the Outs array,
+  /// and the values to be returned by the call are described by the Ins
+  /// array. The implementation should fill in the InVals array with legal-type
+  /// return values from the call, and return the resulting token chain value.
+  virtual SDValue
+    LowerCall(CallLoweringInfo &/*CLI*/,
+              SmallVectorImpl<SDValue> &/*InVals*/) const {
+    llvm_unreachable("Not Implemented");
+  }
+
+  /// Target-specific cleanup for formal ByVal parameters.
+  virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
+
+  /// This hook should be implemented to check whether the return values
+  /// described by the Outs array can fit into the return registers.  If false
+  /// is returned, an sret-demotion is performed.
+  virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
+                              MachineFunction &/*MF*/, bool /*isVarArg*/,
+               const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
+               LLVMContext &/*Context*/) const
+  {
+    // Return true by default to get preexisting behavior.
+    return true;
+  }
+
+  /// This hook must be implemented to lower outgoing return values, described
+  /// by the Outs array, into the specified DAG. The implementation should
+  /// return the resulting token chain value.
+  virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
+                              bool /*isVarArg*/,
+                              const SmallVectorImpl<ISD::OutputArg> & /*Outs*/,
+                              const SmallVectorImpl<SDValue> & /*OutVals*/,
+                              const SDLoc & /*dl*/,
+                              SelectionDAG & /*DAG*/) const {
+    llvm_unreachable("Not Implemented");
+  }
+
+  /// Return true if result of the specified node is used by a return node
+  /// only. It also compute and return the input chain for the tail call.
+  ///
+  /// This is used to determine whether it is possible to codegen a libcall as
+  /// tail call at legalization time.
+  virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
+    return false;
+  }
+
+  /// Return true if the target may be able emit the call instruction as a tail
+  /// call. This is used by optimization passes to determine if it's profitable
+  /// to duplicate return instructions to enable tailcall optimization.
+  virtual bool mayBeEmittedAsTailCall(const CallInst *) const {
+    return false;
+  }
+
+  /// Return the builtin name for the __builtin___clear_cache intrinsic
+  /// Default is to invoke the clear cache library call
+  virtual const char * getClearCacheBuiltinName() const {
+    return "__clear_cache";
+  }
+
+  /// Return the register ID of the name passed in. Used by named register
+  /// global variables extension. There is no target-independent behaviour
+  /// so the default action is to bail.
+  virtual unsigned getRegisterByName(const char* RegName, EVT VT,
+                                     SelectionDAG &DAG) const {
+    report_fatal_error("Named registers not implemented for this target");
+  }
+
+  /// Return the type that should be used to zero or sign extend a
+  /// zeroext/signext integer return value.  FIXME: Some C calling conventions
+  /// require the return type to be promoted, but this is not true all the time,
+  /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling
+  /// conventions. The frontend should handle this and include all of the
+  /// necessary information.
+  virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
+                                       ISD::NodeType /*ExtendKind*/) const {
+    EVT MinVT = getRegisterType(Context, MVT::i32);
+    return VT.bitsLT(MinVT) ? MinVT : VT;
+  }
+
+  /// For some targets, an LLVM struct type must be broken down into multiple
+  /// simple types, but the calling convention specifies that the entire struct
+  /// must be passed in a block of consecutive registers.
+  virtual bool
+  functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
+                                            bool isVarArg) const {
+    return false;
+  }
+
+  /// Returns a 0 terminated array of registers that can be safely used as
+  /// scratch registers.
+  virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
+    return nullptr;
+  }
+
+  /// This callback is used to prepare for a volatile or atomic load.
+  /// It takes a chain node as input and returns the chain for the load itself.
+  ///
+  /// Having a callback like this is necessary for targets like SystemZ,
+  /// which allows a CPU to reuse the result of a previous load indefinitely,
+  /// even if a cache-coherent store is performed by another CPU.  The default
+  /// implementation does nothing.
+  virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL,
+                                              SelectionDAG &DAG) const {
+    return Chain;
+  }
+
+  /// This callback is used to inspect load/store instructions and add
+  /// target-specific MachineMemOperand flags to them.  The default
+  /// implementation does nothing.
+  virtual MachineMemOperand::Flags getMMOFlags(const Instruction &I) const {
+    return MachineMemOperand::MONone;
+  }
+
+  /// This callback is invoked by the type legalizer to legalize nodes with an
+  /// illegal operand type but legal result types.  It replaces the
+  /// LowerOperation callback in the type Legalizer.  The reason we can not do
+  /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
+  /// use this callback.
+  ///
+  /// TODO: Consider merging with ReplaceNodeResults.
+  ///
+  /// The target places new result values for the node in Results (their number
+  /// and types must exactly match those of the original return values of
+  /// the node), or leaves Results empty, which indicates that the node is not
+  /// to be custom lowered after all.
+  /// The default implementation calls LowerOperation.
+  virtual void LowerOperationWrapper(SDNode *N,
+                                     SmallVectorImpl<SDValue> &Results,
+                                     SelectionDAG &DAG) const;
+
+  /// This callback is invoked for operations that are unsupported by the
+  /// target, which are registered to use 'custom' lowering, and whose defined
+  /// values are all legal.  If the target has no operations that require custom
+  /// lowering, it need not implement this.  The default implementation of this
+  /// aborts.
+  virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+
+  /// This callback is invoked when a node result type is illegal for the
+  /// target, and the operation was registered to use 'custom' lowering for that
+  /// result type.  The target places new result values for the node in Results
+  /// (their number and types must exactly match those of the original return
+  /// values of the node), or leaves Results empty, which indicates that the
+  /// node is not to be custom lowered after all.
+  ///
+  /// If the target has no operations that require custom lowering, it need not
+  /// implement this.  The default implementation aborts.
+  virtual void ReplaceNodeResults(SDNode * /*N*/,
+                                  SmallVectorImpl<SDValue> &/*Results*/,
+                                  SelectionDAG &/*DAG*/) const {
+    llvm_unreachable("ReplaceNodeResults not implemented for this target!");
+  }
+
+  /// This method returns the name of a target specific DAG node.
+  virtual const char *getTargetNodeName(unsigned Opcode) const;
+
+  /// This method returns a target specific FastISel object, or null if the
+  /// target does not support "fast" ISel.
+  virtual FastISel *createFastISel(FunctionLoweringInfo &,
+                                   const TargetLibraryInfo *) const {
+    return nullptr;
+  }
+
+  bool verifyReturnAddressArgumentIsConstant(SDValue Op,
+                                             SelectionDAG &DAG) const;
+
+  //===--------------------------------------------------------------------===//
+  // Inline Asm Support hooks
+  //
+
+  /// This hook allows the target to expand an inline asm call to be explicit
+  /// llvm code if it wants to.  This is useful for turning simple inline asms
+  /// into LLVM intrinsics, which gives the compiler more information about the
+  /// behavior of the code.
+  virtual bool ExpandInlineAsm(CallInst *) const {
+    return false;
+  }
+
+  enum ConstraintType {
+    C_Register,            // Constraint represents specific register(s).
+    C_RegisterClass,       // Constraint represents any of register(s) in class.
+    C_Memory,              // Memory constraint.
+    C_Other,               // Something else.
+    C_Unknown              // Unsupported constraint.
+  };
+
+  enum ConstraintWeight {
+    // Generic weights.
+    CW_Invalid  = -1,     // No match.
+    CW_Okay     = 0,      // Acceptable.
+    CW_Good     = 1,      // Good weight.
+    CW_Better   = 2,      // Better weight.
+    CW_Best     = 3,      // Best weight.
+
+    // Well-known weights.
+    CW_SpecificReg  = CW_Okay,    // Specific register operands.
+    CW_Register     = CW_Good,    // Register operands.
+    CW_Memory       = CW_Better,  // Memory operands.
+    CW_Constant     = CW_Best,    // Constant operand.
+    CW_Default      = CW_Okay     // Default or don't know type.
+  };
+
+  /// This contains information for each constraint that we are lowering.
+  struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
+    /// This contains the actual string for the code, like "m".  TargetLowering
+    /// picks the 'best' code from ConstraintInfo::Codes that most closely
+    /// matches the operand.
+    std::string ConstraintCode;
+
+    /// Information about the constraint code, e.g. Register, RegisterClass,
+    /// Memory, Other, Unknown.
+    TargetLowering::ConstraintType ConstraintType = TargetLowering::C_Unknown;
+
+    /// If this is the result output operand or a clobber, this is null,
+    /// otherwise it is the incoming operand to the CallInst.  This gets
+    /// modified as the asm is processed.
+    Value *CallOperandVal = nullptr;
+
+    /// The ValueType for the operand value.
+    MVT ConstraintVT = MVT::Other;
+
+    /// Copy constructor for copying from a ConstraintInfo.
+    AsmOperandInfo(InlineAsm::ConstraintInfo Info)
+        : InlineAsm::ConstraintInfo(std::move(Info)) {}
+
+    /// Return true of this is an input operand that is a matching constraint
+    /// like "4".
+    bool isMatchingInputConstraint() const;
+
+    /// If this is an input matching constraint, this method returns the output
+    /// operand it matches.
+    unsigned getMatchedOperand() const;
+  };
+
+  using AsmOperandInfoVector = std::vector<AsmOperandInfo>;
+
+  /// Split up the constraint string from the inline assembly value into the
+  /// specific constraints and their prefixes, and also tie in the associated
+  /// operand values.  If this returns an empty vector, and if the constraint
+  /// string itself isn't empty, there was an error parsing.
+  virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL,
+                                                const TargetRegisterInfo *TRI,
+                                                ImmutableCallSite CS) const;
+
+  /// Examine constraint type and operand type and determine a weight value.
+  /// The operand object must already have been set up with the operand type.
+  virtual ConstraintWeight getMultipleConstraintMatchWeight(
+      AsmOperandInfo &info, int maIndex) const;
+
+  /// Examine constraint string and operand type and determine a weight value.
+  /// The operand object must already have been set up with the operand type.
+  virtual ConstraintWeight getSingleConstraintMatchWeight(
+      AsmOperandInfo &info, const char *constraint) const;
+
+  /// Determines the constraint code and constraint type to use for the specific
+  /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
+  /// If the actual operand being passed in is available, it can be passed in as
+  /// Op, otherwise an empty SDValue can be passed.
+  virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
+                                      SDValue Op,
+                                      SelectionDAG *DAG = nullptr) const;
+
+  /// Given a constraint, return the type of constraint it is for this target.
+  virtual ConstraintType getConstraintType(StringRef Constraint) const;
+
+  /// Given a physical register constraint (e.g.  {edx}), return the register
+  /// number and the register class for the register.
+  ///
+  /// Given a register class constraint, like 'r', if this corresponds directly
+  /// to an LLVM register class, return a register of 0 and the register class
+  /// pointer.
+  ///
+  /// This should only be used for C_Register constraints.  On error, this
+  /// returns a register number of 0 and a null register class pointer.
+  virtual std::pair<unsigned, const TargetRegisterClass *>
+  getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+                               StringRef Constraint, MVT VT) const;
+
+  virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const {
+    if (ConstraintCode == "i")
+      return InlineAsm::Constraint_i;
+    else if (ConstraintCode == "m")
+      return InlineAsm::Constraint_m;
+    return InlineAsm::Constraint_Unknown;
+  }
+
+  /// Try to replace an X constraint, which matches anything, with another that
+  /// has more specific requirements based on the type of the corresponding
+  /// operand.  This returns null if there is no replacement to make.
+  virtual const char *LowerXConstraint(EVT ConstraintVT) const;
+
+  /// Lower the specified operand into the Ops vector.  If it is invalid, don't
+  /// add anything to Ops.
+  virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
+                                            std::vector<SDValue> &Ops,
+                                            SelectionDAG &DAG) const;
+
+  //===--------------------------------------------------------------------===//
+  // Div utility functions
+  //
+  SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
+                    bool IsAfterLegalization,
+                    std::vector<SDNode *> *Created) const;
+  SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
+                    bool IsAfterLegalization,
+                    std::vector<SDNode *> *Created) const;
+
+  /// Targets may override this function to provide custom SDIV lowering for
+  /// power-of-2 denominators.  If the target returns an empty SDValue, LLVM
+  /// assumes SDIV is expensive and replaces it with a series of other integer
+  /// operations.
+  virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
+                                SelectionDAG &DAG,
+                                std::vector<SDNode *> *Created) const;
+
+  /// Indicate whether this target prefers to combine FDIVs with the same
+  /// divisor. If the transform should never be done, return zero. If the
+  /// transform should be done, return the minimum number of divisor uses
+  /// that must exist.
+  virtual unsigned combineRepeatedFPDivisors() const {
+    return 0;
+  }
+
+  /// Hooks for building estimates in place of slower divisions and square
+  /// roots.
+
+  /// Return either a square root or its reciprocal estimate value for the input
+  /// operand.
+  /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
+  /// 'Enabled' as set by a potential default override attribute.
+  /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
+  /// refinement iterations required to generate a sufficient (though not
+  /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
+  /// The boolean UseOneConstNR output is used to select a Newton-Raphson
+  /// algorithm implementation that uses either one or two constants.
+  /// The boolean Reciprocal is used to select whether the estimate is for the
+  /// square root of the input operand or the reciprocal of its square root.
+  /// A target may choose to implement its own refinement within this function.
+  /// If that's true, then return '0' as the number of RefinementSteps to avoid
+  /// any further refinement of the estimate.
+  /// An empty SDValue return means no estimate sequence can be created.
+  virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
+                                  int Enabled, int &RefinementSteps,
+                                  bool &UseOneConstNR, bool Reciprocal) const {
+    return SDValue();
+  }
+
+  /// Return a reciprocal estimate value for the input operand.
+  /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
+  /// 'Enabled' as set by a potential default override attribute.
+  /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
+  /// refinement iterations required to generate a sufficient (though not
+  /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
+  /// A target may choose to implement its own refinement within this function.
+  /// If that's true, then return '0' as the number of RefinementSteps to avoid
+  /// any further refinement of the estimate.
+  /// An empty SDValue return means no estimate sequence can be created.
+  virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
+                                   int Enabled, int &RefinementSteps) const {
+    return SDValue();
+  }
+
+  //===--------------------------------------------------------------------===//
+  // Legalization utility functions
+  //
+
+  /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes,
+  /// respectively, each computing an n/2-bit part of the result.
+  /// \param Result A vector that will be filled with the parts of the result
+  ///        in little-endian order.
+  /// \param LL Low bits of the LHS of the MUL.  You can use this parameter
+  ///        if you want to control how low bits are extracted from the LHS.
+  /// \param LH High bits of the LHS of the MUL.  See LL for meaning.
+  /// \param RL Low bits of the RHS of the MUL.  See LL for meaning
+  /// \param RH High bits of the RHS of the MUL.  See LL for meaning.
+  /// \returns true if the node has been expanded, false if it has not
+  bool expandMUL_LOHI(unsigned Opcode, EVT VT, SDLoc dl, SDValue LHS,
+                      SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT,
+                      SelectionDAG &DAG, MulExpansionKind Kind,
+                      SDValue LL = SDValue(), SDValue LH = SDValue(),
+                      SDValue RL = SDValue(), SDValue RH = SDValue()) const;
+
+  /// Expand a MUL into two nodes.  One that computes the high bits of
+  /// the result and one that computes the low bits.
+  /// \param HiLoVT The value type to use for the Lo and Hi nodes.
+  /// \param LL Low bits of the LHS of the MUL.  You can use this parameter
+  ///        if you want to control how low bits are extracted from the LHS.
+  /// \param LH High bits of the LHS of the MUL.  See LL for meaning.
+  /// \param RL Low bits of the RHS of the MUL.  See LL for meaning
+  /// \param RH High bits of the RHS of the MUL.  See LL for meaning.
+  /// \returns true if the node has been expanded. false if it has not
+  bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
+                 SelectionDAG &DAG, MulExpansionKind Kind,
+                 SDValue LL = SDValue(), SDValue LH = SDValue(),
+                 SDValue RL = SDValue(), SDValue RH = SDValue()) const;
+
+  /// Expand float(f32) to SINT(i64) conversion
+  /// \param N Node to expand
+  /// \param Result output after conversion
+  /// \returns True, if the expansion was successful, false otherwise
+  bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
+
+  /// Turn load of vector type into a load of the individual elements.
+  /// \param LD load to expand
+  /// \returns MERGE_VALUEs of the scalar loads with their chains.
+  SDValue scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const;
+
+  // Turn a store of a vector type into stores of the individual elements.
+  /// \param ST Store with a vector value type
+  /// \returns MERGE_VALUs of the individual store chains.
+  SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const;
+
+  /// Expands an unaligned load to 2 half-size loads for an integer, and
+  /// possibly more for vectors.
+  std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD,
+                                                  SelectionDAG &DAG) const;
+
+  /// Expands an unaligned store to 2 half-size stores for integer values, and
+  /// possibly more for vectors.
+  SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const;
+
+  /// Increments memory address \p Addr according to the type of the value
+  /// \p DataVT that should be stored. If the data is stored in compressed
+  /// form, the memory address should be incremented according to the number of
+  /// the stored elements. This number is equal to the number of '1's bits
+  /// in the \p Mask.
+  /// \p DataVT is a vector type. \p Mask is a vector value.
+  /// \p DataVT and \p Mask have the same number of vector elements.
+  SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL,
+                                 EVT DataVT, SelectionDAG &DAG,
+                                 bool IsCompressedMemory) const;
+
+  /// Get a pointer to vector element \p Idx located in memory for a vector of
+  /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of
+  /// bounds the returned pointer is unspecified, but will be within the vector
+  /// bounds.
+  SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT,
+                                  SDValue Idx) const;
+
+  //===--------------------------------------------------------------------===//
+  // Instruction Emitting Hooks
+  //
+
+  /// This method should be implemented by targets that mark instructions with
+  /// the 'usesCustomInserter' flag.  These instructions are special in various
+  /// ways, which require special support to insert.  The specified MachineInstr
+  /// is created but not inserted into any basic blocks, and this method is
+  /// called to expand it into a sequence of instructions, potentially also
+  /// creating new basic blocks and control flow.
+  /// As long as the returned basic block is different (i.e., we created a new
+  /// one), the custom inserter is free to modify the rest of \p MBB.
+  virtual MachineBasicBlock *
+  EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const;
+
+  /// This method should be implemented by targets that mark instructions with
+  /// the 'hasPostISelHook' flag. These instructions must be adjusted after
+  /// instruction selection by target hooks.  e.g. To fill in optional defs for
+  /// ARM 's' setting instructions.
+  virtual void AdjustInstrPostInstrSelection(MachineInstr &MI,
+                                             SDNode *Node) const;
+
+  /// If this function returns true, SelectionDAGBuilder emits a
+  /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
+  virtual bool useLoadStackGuardNode() const {
+    return false;
+  }
+
+  virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
+                                      const SDLoc &DL) const {
+    llvm_unreachable("not implemented for this target");
+  }
+
+  /// Lower TLS global address SDNode for target independent emulated TLS model.
+  virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
+                                          SelectionDAG &DAG) const;
+
+  /// Expands target specific indirect branch for the case of JumpTable
+  /// expanasion.
+  virtual SDValue expandIndirectJTBranch(const SDLoc& dl, SDValue Value, SDValue Addr,
+                                         SelectionDAG &DAG) const {
+    return DAG.getNode(ISD::BRIND, dl, MVT::Other, Value, Addr);
+  }
+
+  // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits)))
+  // If we're comparing for equality to zero and isCtlzFast is true, expose the
+  // fact that this can be implemented as a ctlz/srl pair, so that the dag
+  // combiner can fold the new nodes.
+  SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const;
+
+private:
+  SDValue simplifySetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
+                               ISD::CondCode Cond, DAGCombinerInfo &DCI,
+                               const SDLoc &DL) const;
+};
+
+/// Given an LLVM IR type and return type attributes, compute the return value
+/// EVTs and flags, and optionally also the offsets, if the return value is
+/// being lowered to memory.
+void GetReturnInfo(Type *ReturnType, AttributeList attr,
+                   SmallVectorImpl<ISD::OutputArg> &Outs,
+                   const TargetLowering &TLI, const DataLayout &DL);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_TARGETLOWERING_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h b/linux-x64/clang/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
new file mode 100644
index 0000000..78da77f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
@@ -0,0 +1,200 @@
+//==- llvm/CodeGen/TargetLoweringObjectFileImpl.h - Object Info --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements classes used to handle lowerings specific to common
+// object file formats.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETLOWERINGOBJECTFILEIMPL_H
+#define LLVM_CODEGEN_TARGETLOWERINGOBJECTFILEIMPL_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+
+namespace llvm {
+
+class GlobalValue;
+class MachineModuleInfo;
+class Mangler;
+class MCContext;
+class MCSection;
+class MCSymbol;
+class TargetMachine;
+
+class TargetLoweringObjectFileELF : public TargetLoweringObjectFile {
+  bool UseInitArray = false;
+  mutable unsigned NextUniqueID = 1;  // ID 0 is reserved for execute-only sections
+
+protected:
+  MCSymbolRefExpr::VariantKind PLTRelativeVariantKind =
+      MCSymbolRefExpr::VK_None;
+
+public:
+  TargetLoweringObjectFileELF() = default;
+  ~TargetLoweringObjectFileELF() override = default;
+
+  /// Emit Obj-C garbage collection and linker options.
+  void emitModuleMetadata(MCStreamer &Streamer, Module &M,
+                          const TargetMachine &TM) const override;
+
+  void emitPersonalityValue(MCStreamer &Streamer, const DataLayout &TM,
+                            const MCSymbol *Sym) const override;
+
+  /// Given a constant with the SectionKind, return a section that it should be
+  /// placed in.
+  MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
+                                   const Constant *C,
+                                   unsigned &Align) const override;
+
+  MCSection *getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
+                                      const TargetMachine &TM) const override;
+
+  MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
+                                    const TargetMachine &TM) const override;
+
+  MCSection *getSectionForJumpTable(const Function &F,
+                                    const TargetMachine &TM) const override;
+
+  bool shouldPutJumpTableInFunctionSection(bool UsesLabelDifference,
+                                           const Function &F) const override;
+
+  /// Return an MCExpr to use for a reference to the specified type info global
+  /// variable from exception handling information.
+  const MCExpr *getTTypeGlobalReference(const GlobalValue *GV,
+                                        unsigned Encoding,
+                                        const TargetMachine &TM,
+                                        MachineModuleInfo *MMI,
+                                        MCStreamer &Streamer) const override;
+
+  // The symbol that gets passed to .cfi_personality.
+  MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV,
+                                    const TargetMachine &TM,
+                                    MachineModuleInfo *MMI) const override;
+
+  void InitializeELF(bool UseInitArray_);
+  MCSection *getStaticCtorSection(unsigned Priority,
+                                  const MCSymbol *KeySym) const override;
+  MCSection *getStaticDtorSection(unsigned Priority,
+                                  const MCSymbol *KeySym) const override;
+
+  const MCExpr *lowerRelativeReference(const GlobalValue *LHS,
+                                       const GlobalValue *RHS,
+                                       const TargetMachine &TM) const override;
+};
+
+class TargetLoweringObjectFileMachO : public TargetLoweringObjectFile {
+public:
+  TargetLoweringObjectFileMachO();
+  ~TargetLoweringObjectFileMachO() override = default;
+
+  void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
+
+  /// Emit the module flags that specify the garbage collection information.
+  void emitModuleMetadata(MCStreamer &Streamer, Module &M,
+                          const TargetMachine &TM) const override;
+
+  MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
+                                    const TargetMachine &TM) const override;
+
+  MCSection *getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
+                                      const TargetMachine &TM) const override;
+
+  MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
+                                   const Constant *C,
+                                   unsigned &Align) const override;
+
+  /// The mach-o version of this method defaults to returning a stub reference.
+  const MCExpr *getTTypeGlobalReference(const GlobalValue *GV,
+                                        unsigned Encoding,
+                                        const TargetMachine &TM,
+                                        MachineModuleInfo *MMI,
+                                        MCStreamer &Streamer) const override;
+
+  // The symbol that gets passed to .cfi_personality.
+  MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV,
+                                    const TargetMachine &TM,
+                                    MachineModuleInfo *MMI) const override;
+
+  /// Get MachO PC relative GOT entry relocation
+  const MCExpr *getIndirectSymViaGOTPCRel(const MCSymbol *Sym,
+                                          const MCValue &MV, int64_t Offset,
+                                          MachineModuleInfo *MMI,
+                                          MCStreamer &Streamer) const override;
+
+  void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
+                         const TargetMachine &TM) const override;
+};
+
+class TargetLoweringObjectFileCOFF : public TargetLoweringObjectFile {
+  mutable unsigned NextUniqueID = 0;
+
+public:
+  ~TargetLoweringObjectFileCOFF() override = default;
+
+  void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
+  MCSection *getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
+                                      const TargetMachine &TM) const override;
+
+  MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
+                                    const TargetMachine &TM) const override;
+
+  void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
+                         const TargetMachine &TM) const override;
+
+  MCSection *getSectionForJumpTable(const Function &F,
+                                    const TargetMachine &TM) const override;
+
+  /// Emit Obj-C garbage collection and linker options.
+  void emitModuleMetadata(MCStreamer &Streamer, Module &M,
+                          const TargetMachine &TM) const override;
+
+  MCSection *getStaticCtorSection(unsigned Priority,
+                                  const MCSymbol *KeySym) const override;
+  MCSection *getStaticDtorSection(unsigned Priority,
+                                  const MCSymbol *KeySym) const override;
+
+  void emitLinkerFlagsForGlobal(raw_ostream &OS,
+                                const GlobalValue *GV) const override;
+
+  void emitLinkerFlagsForUsed(raw_ostream &OS,
+                              const GlobalValue *GV) const override;
+};
+
+class TargetLoweringObjectFileWasm : public TargetLoweringObjectFile {
+  mutable unsigned NextUniqueID = 0;
+
+public:
+  TargetLoweringObjectFileWasm() = default;
+  ~TargetLoweringObjectFileWasm() override = default;
+
+  MCSection *getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
+                                      const TargetMachine &TM) const override;
+
+  MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
+                                    const TargetMachine &TM) const override;
+
+  bool shouldPutJumpTableInFunctionSection(bool UsesLabelDifference,
+                                           const Function &F) const override;
+
+  void InitializeWasm();
+  MCSection *getStaticCtorSection(unsigned Priority,
+                                  const MCSymbol *KeySym) const override;
+  MCSection *getStaticDtorSection(unsigned Priority,
+                                  const MCSymbol *KeySym) const override;
+
+  const MCExpr *lowerRelativeReference(const GlobalValue *LHS,
+                                       const GlobalValue *RHS,
+                                       const TargetMachine &TM) const override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_TARGETLOWERINGOBJECTFILEIMPL_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetOpcodes.h b/linux-x64/clang/include/llvm/CodeGen/TargetOpcodes.h
new file mode 100644
index 0000000..d0d959c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetOpcodes.h
@@ -0,0 +1,42 @@
+//===-- llvm/CodeGen/TargetOpcodes.h - Target Indep Opcodes -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target independent instruction opcodes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETOPCODES_H
+#define LLVM_CODEGEN_TARGETOPCODES_H
+
+namespace llvm {
+
+/// Invariant opcodes: All instruction sets have these as their low opcodes.
+///
+namespace TargetOpcode {
+enum {
+#define HANDLE_TARGET_OPCODE(OPC) OPC,
+#define HANDLE_TARGET_OPCODE_MARKER(IDENT, OPC) IDENT = OPC,
+#include "llvm/Support/TargetOpcodes.def"
+};
+} // end namespace TargetOpcode
+
+/// Check whether the given Opcode is a generic opcode that is not supposed
+/// to appear after ISel.
+inline bool isPreISelGenericOpcode(unsigned Opcode) {
+  return Opcode >= TargetOpcode::PRE_ISEL_GENERIC_OPCODE_START &&
+         Opcode <= TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END;
+}
+
+/// Check whether the given Opcode is a target-specific opcode.
+inline bool isTargetSpecificOpcode(unsigned Opcode) {
+  return Opcode > TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END;
+}
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetPassConfig.h b/linux-x64/clang/include/llvm/CodeGen/TargetPassConfig.h
new file mode 100644
index 0000000..5918c52
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetPassConfig.h
@@ -0,0 +1,433 @@
+//===- TargetPassConfig.h - Code Generation pass options --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// Target-Independent Code Generator Pass Configuration Options pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETPASSCONFIG_H
+#define LLVM_CODEGEN_TARGETPASSCONFIG_H
+
+#include "llvm/Pass.h"
+#include "llvm/Support/CodeGen.h"
+#include <cassert> 
+#include <string>
+
+namespace llvm {
+
+class LLVMTargetMachine;
+struct MachineSchedContext;
+class PassConfigImpl;
+class ScheduleDAGInstrs;
+
+// The old pass manager infrastructure is hidden in a legacy namespace now.
+namespace legacy {
+
+class PassManagerBase;
+
+} // end namespace legacy
+
+using legacy::PassManagerBase;
+
+/// Discriminated union of Pass ID types.
+///
+/// The PassConfig API prefers dealing with IDs because they are safer and more
+/// efficient. IDs decouple configuration from instantiation. This way, when a
+/// pass is overriden, it isn't unnecessarily instantiated. It is also unsafe to
+/// refer to a Pass pointer after adding it to a pass manager, which deletes
+/// redundant pass instances.
+///
+/// However, it is convient to directly instantiate target passes with
+/// non-default ctors. These often don't have a registered PassInfo. Rather than
+/// force all target passes to implement the pass registry boilerplate, allow
+/// the PassConfig API to handle either type.
+///
+/// AnalysisID is sadly char*, so PointerIntPair won't work.
+class IdentifyingPassPtr {
+  union {
+    AnalysisID ID;
+    Pass *P;
+  };
+  bool IsInstance = false;
+
+public:
+  IdentifyingPassPtr() : P(nullptr) {}
+  IdentifyingPassPtr(AnalysisID IDPtr) : ID(IDPtr) {}
+  IdentifyingPassPtr(Pass *InstancePtr) : P(InstancePtr), IsInstance(true) {}
+
+  bool isValid() const { return P; }
+  bool isInstance() const { return IsInstance; }
+
+  AnalysisID getID() const {
+    assert(!IsInstance && "Not a Pass ID");
+    return ID;
+  }
+
+  Pass *getInstance() const {
+    assert(IsInstance && "Not a Pass Instance");
+    return P;
+  }
+};
+
+template <> struct isPodLike<IdentifyingPassPtr> {
+  static const bool value = true;
+};
+
+/// Target-Independent Code Generator Pass Configuration Options.
+///
+/// This is an ImmutablePass solely for the purpose of exposing CodeGen options
+/// to the internals of other CodeGen passes.
+class TargetPassConfig : public ImmutablePass {
+private:
+  PassManagerBase *PM = nullptr;
+  AnalysisID StartBefore = nullptr;
+  AnalysisID StartAfter = nullptr;
+  AnalysisID StopBefore = nullptr;
+  AnalysisID StopAfter = nullptr;
+  bool Started = true;
+  bool Stopped = false;
+  bool AddingMachinePasses = false;
+
+  /// Set the StartAfter, StartBefore and StopAfter passes to allow running only
+  /// a portion of the normal code-gen pass sequence.
+  ///
+  /// If the StartAfter and StartBefore pass ID is zero, then compilation will
+  /// begin at the normal point; otherwise, clear the Started flag to indicate
+  /// that passes should not be added until the starting pass is seen.  If the
+  /// Stop pass ID is zero, then compilation will continue to the end.
+  ///
+  /// This function expects that at least one of the StartAfter or the
+  /// StartBefore pass IDs is null.
+  void setStartStopPasses();
+
+protected:
+  LLVMTargetMachine *TM;
+  PassConfigImpl *Impl = nullptr; // Internal data structures
+  bool Initialized = false; // Flagged after all passes are configured.
+
+  // Target Pass Options
+  // Targets provide a default setting, user flags override.
+  bool DisableVerify = false;
+
+  /// Default setting for -enable-tail-merge on this target.
+  bool EnableTailMerge = true;
+
+  /// Require processing of functions such that callees are generated before
+  /// callers.
+  bool RequireCodeGenSCCOrder = false;
+
+  /// Add the actual instruction selection passes. This does not include
+  /// preparation passes on IR.
+  bool addCoreISelPasses();
+
+public:
+  TargetPassConfig(LLVMTargetMachine &TM, PassManagerBase &pm);
+  // Dummy constructor.
+  TargetPassConfig();
+
+  ~TargetPassConfig() override;
+
+  static char ID;
+
+  /// Get the right type of TargetMachine for this target.
+  template<typename TMC> TMC &getTM() const {
+    return *static_cast<TMC*>(TM);
+  }
+
+  //
+  void setInitialized() { Initialized = true; }
+
+  CodeGenOpt::Level getOptLevel() const;
+
+  /// Describe the status of the codegen
+  /// pipeline set by this target pass config.
+  /// Having a limited codegen pipeline means that options
+  /// have been used to restrict what codegen is doing.
+  /// In particular, that means that codegen won't emit
+  /// assembly code.
+  bool hasLimitedCodeGenPipeline() const;
+
+  /// If hasLimitedCodeGenPipeline is true, this method
+  /// returns a string with the name of the options, separated
+  /// by \p Separator that caused this pipeline to be limited.
+  std::string
+  getLimitedCodeGenPipelineReason(const char *Separator = "/") const;
+
+  /// Check if the codegen pipeline is limited in such a way that it
+  /// won't be complete. When the codegen pipeline is not complete,
+  /// this means it may not be possible to generate assembly from it.
+  bool willCompleteCodeGenPipeline() const {
+    return !hasLimitedCodeGenPipeline() || (!StopAfter && !StopBefore);
+  }
+
+  void setDisableVerify(bool Disable) { setOpt(DisableVerify, Disable); }
+
+  bool getEnableTailMerge() const { return EnableTailMerge; }
+  void setEnableTailMerge(bool Enable) { setOpt(EnableTailMerge, Enable); }
+
+  bool requiresCodeGenSCCOrder() const { return RequireCodeGenSCCOrder; }
+  void setRequiresCodeGenSCCOrder(bool Enable = true) {
+    setOpt(RequireCodeGenSCCOrder, Enable);
+  }
+
+  /// Allow the target to override a specific pass without overriding the pass
+  /// pipeline. When passes are added to the standard pipeline at the
+  /// point where StandardID is expected, add TargetID in its place.
+  void substitutePass(AnalysisID StandardID, IdentifyingPassPtr TargetID);
+
+  /// Insert InsertedPassID pass after TargetPassID pass.
+  void insertPass(AnalysisID TargetPassID, IdentifyingPassPtr InsertedPassID,
+                  bool VerifyAfter = true, bool PrintAfter = true);
+
+  /// Allow the target to enable a specific standard pass by default.
+  void enablePass(AnalysisID PassID) { substitutePass(PassID, PassID); }
+
+  /// Allow the target to disable a specific standard pass by default.
+  void disablePass(AnalysisID PassID) {
+    substitutePass(PassID, IdentifyingPassPtr());
+  }
+
+  /// Return the pass substituted for StandardID by the target.
+  /// If no substitution exists, return StandardID.
+  IdentifyingPassPtr getPassSubstitution(AnalysisID StandardID) const;
+
+  /// Return true if the pass has been substituted by the target or
+  /// overridden on the command line.
+  bool isPassSubstitutedOrOverridden(AnalysisID ID) const;
+
+  /// Return true if the optimized regalloc pipeline is enabled.
+  bool getOptimizeRegAlloc() const;
+
+  /// Return true if the default global register allocator is in use and
+  /// has not be overriden on the command line with '-regalloc=...'
+  bool usingDefaultRegAlloc() const;
+
+  /// High level function that adds all passes necessary to go from llvm IR
+  /// representation to the MI representation.
+  /// Adds IR based lowering and target specific optimization passes and finally
+  /// the core instruction selection passes.
+  /// \returns true if an error occurred, false otherwise.
+  bool addISelPasses();
+
+  /// Add common target configurable passes that perform LLVM IR to IR
+  /// transforms following machine independent optimization.
+  virtual void addIRPasses();
+
+  /// Add passes to lower exception handling for the code generator.
+  void addPassesToHandleExceptions();
+
+  /// Add pass to prepare the LLVM IR for code generation. This should be done
+  /// before exception handling preparation passes.
+  virtual void addCodeGenPrepare();
+
+  /// Add common passes that perform LLVM IR to IR transforms in preparation for
+  /// instruction selection.
+  virtual void addISelPrepare();
+
+  /// addInstSelector - This method should install an instruction selector pass,
+  /// which converts from LLVM code to machine instructions.
+  virtual bool addInstSelector() {
+    return true;
+  }
+
+  /// This method should install an IR translator pass, which converts from
+  /// LLVM code to machine instructions with possibly generic opcodes.
+  virtual bool addIRTranslator() { return true; }
+
+  /// This method may be implemented by targets that want to run passes
+  /// immediately before legalization.
+  virtual void addPreLegalizeMachineIR() {}
+
+  /// This method should install a legalize pass, which converts the instruction
+  /// sequence into one that can be selected by the target.
+  virtual bool addLegalizeMachineIR() { return true; }
+
+  /// This method may be implemented by targets that want to run passes
+  /// immediately before the register bank selection.
+  virtual void addPreRegBankSelect() {}
+
+  /// This method should install a register bank selector pass, which
+  /// assigns register banks to virtual registers without a register
+  /// class or register banks.
+  virtual bool addRegBankSelect() { return true; }
+
+  /// This method may be implemented by targets that want to run passes
+  /// immediately before the (global) instruction selection.
+  virtual void addPreGlobalInstructionSelect() {}
+
+  /// This method should install a (global) instruction selector pass, which
+  /// converts possibly generic instructions to fully target-specific
+  /// instructions, thereby constraining all generic virtual registers to
+  /// register classes.
+  virtual bool addGlobalInstructionSelect() { return true; }
+
+  /// Add the complete, standard set of LLVM CodeGen passes.
+  /// Fully developed targets will not generally override this.
+  virtual void addMachinePasses();
+
+  /// Create an instance of ScheduleDAGInstrs to be run within the standard
+  /// MachineScheduler pass for this function and target at the current
+  /// optimization level.
+  ///
+  /// This can also be used to plug a new MachineSchedStrategy into an instance
+  /// of the standard ScheduleDAGMI:
+  ///   return new ScheduleDAGMI(C, make_unique<MyStrategy>(C), /*RemoveKillFlags=*/false)
+  ///
+  /// Return NULL to select the default (generic) machine scheduler.
+  virtual ScheduleDAGInstrs *
+  createMachineScheduler(MachineSchedContext *C) const {
+    return nullptr;
+  }
+
+  /// Similar to createMachineScheduler but used when postRA machine scheduling
+  /// is enabled.
+  virtual ScheduleDAGInstrs *
+  createPostMachineScheduler(MachineSchedContext *C) const {
+    return nullptr;
+  }
+
+  /// printAndVerify - Add a pass to dump then verify the machine function, if
+  /// those steps are enabled.
+  void printAndVerify(const std::string &Banner);
+
+  /// Add a pass to print the machine function if printing is enabled.
+  void addPrintPass(const std::string &Banner);
+
+  /// Add a pass to perform basic verification of the machine function if
+  /// verification is enabled.
+  void addVerifyPass(const std::string &Banner);
+
+  /// Check whether or not GlobalISel should abort on error.
+  /// When this is disabled, GlobalISel will fall back on SDISel instead of
+  /// erroring out.
+  bool isGlobalISelAbortEnabled() const;
+
+  /// Check whether or not a diagnostic should be emitted when GlobalISel
+  /// uses the fallback path. In other words, it will emit a diagnostic
+  /// when GlobalISel failed and isGlobalISelAbortEnabled is false.
+  virtual bool reportDiagnosticWhenGlobalISelFallback() const;
+
+protected:
+  // Helper to verify the analysis is really immutable.
+  void setOpt(bool &Opt, bool Val);
+
+  /// Methods with trivial inline returns are convenient points in the common
+  /// codegen pass pipeline where targets may insert passes. Methods with
+  /// out-of-line standard implementations are major CodeGen stages called by
+  /// addMachinePasses. Some targets may override major stages when inserting
+  /// passes is insufficient, but maintaining overriden stages is more work.
+  ///
+
+  /// addPreISelPasses - This method should add any "last minute" LLVM->LLVM
+  /// passes (which are run just before instruction selector).
+  virtual bool addPreISel() {
+    return true;
+  }
+
+  /// addMachineSSAOptimization - Add standard passes that optimize machine
+  /// instructions in SSA form.
+  virtual void addMachineSSAOptimization();
+
+  /// Add passes that optimize instruction level parallelism for out-of-order
+  /// targets. These passes are run while the machine code is still in SSA
+  /// form, so they can use MachineTraceMetrics to control their heuristics.
+  ///
+  /// All passes added here should preserve the MachineDominatorTree,
+  /// MachineLoopInfo, and MachineTraceMetrics analyses.
+  virtual bool addILPOpts() {
+    return false;
+  }
+
+  /// This method may be implemented by targets that want to run passes
+  /// immediately before register allocation.
+  virtual void addPreRegAlloc() { }
+
+  /// createTargetRegisterAllocator - Create the register allocator pass for
+  /// this target at the current optimization level.
+  virtual FunctionPass *createTargetRegisterAllocator(bool Optimized);
+
+  /// addFastRegAlloc - Add the minimum set of target-independent passes that
+  /// are required for fast register allocation.
+  virtual void addFastRegAlloc(FunctionPass *RegAllocPass);
+
+  /// addOptimizedRegAlloc - Add passes related to register allocation.
+  /// LLVMTargetMachine provides standard regalloc passes for most targets.
+  virtual void addOptimizedRegAlloc(FunctionPass *RegAllocPass);
+
+  /// addPreRewrite - Add passes to the optimized register allocation pipeline
+  /// after register allocation is complete, but before virtual registers are
+  /// rewritten to physical registers.
+  ///
+  /// These passes must preserve VirtRegMap and LiveIntervals, and when running
+  /// after RABasic or RAGreedy, they should take advantage of LiveRegMatrix.
+  /// When these passes run, VirtRegMap contains legal physreg assignments for
+  /// all virtual registers.
+  virtual bool addPreRewrite() {
+    return false;
+  }
+
+  /// This method may be implemented by targets that want to run passes after
+  /// register allocation pass pipeline but before prolog-epilog insertion.
+  virtual void addPostRegAlloc() { }
+
+  /// Add passes that optimize machine instructions after register allocation.
+  virtual void addMachineLateOptimization();
+
+  /// This method may be implemented by targets that want to run passes after
+  /// prolog-epilog insertion and before the second instruction scheduling pass.
+  virtual void addPreSched2() { }
+
+  /// addGCPasses - Add late codegen passes that analyze code for garbage
+  /// collection. This should return true if GC info should be printed after
+  /// these passes.
+  virtual bool addGCPasses();
+
+  /// Add standard basic block placement passes.
+  virtual void addBlockPlacement();
+
+  /// This pass may be implemented by targets that want to run passes
+  /// immediately before machine code is emitted.
+  virtual void addPreEmitPass() { }
+
+  /// Targets may add passes immediately before machine code is emitted in this
+  /// callback. This is called even later than `addPreEmitPass`.
+  // FIXME: Rename `addPreEmitPass` to something more sensible given its actual
+  // position and remove the `2` suffix here as this callback is what
+  // `addPreEmitPass` *should* be but in reality isn't.
+  virtual void addPreEmitPass2() {}
+
+  /// Utilities for targets to add passes to the pass manager.
+  ///
+
+  /// Add a CodeGen pass at this point in the pipeline after checking overrides.
+  /// Return the pass that was added, or zero if no pass was added.
+  /// @p printAfter    if true and adding a machine function pass add an extra
+  ///                  machine printer pass afterwards
+  /// @p verifyAfter   if true and adding a machine function pass add an extra
+  ///                  machine verification pass afterwards.
+  AnalysisID addPass(AnalysisID PassID, bool verifyAfter = true,
+                     bool printAfter = true);
+
+  /// Add a pass to the PassManager if that pass is supposed to be run, as
+  /// determined by the StartAfter and StopAfter options. Takes ownership of the
+  /// pass.
+  /// @p printAfter    if true and adding a machine function pass add an extra
+  ///                  machine printer pass afterwards
+  /// @p verifyAfter   if true and adding a machine function pass add an extra
+  ///                  machine verification pass afterwards.
+  void addPass(Pass *P, bool verifyAfter = true, bool printAfter = true);
+
+  /// addMachinePasses helper to create the target-selected or overriden
+  /// regalloc pass.
+  FunctionPass *createRegAllocPass(bool Optimized);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_TARGETPASSCONFIG_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetRegisterInfo.h b/linux-x64/clang/include/llvm/CodeGen/TargetRegisterInfo.h
new file mode 100644
index 0000000..ea47a24
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetRegisterInfo.h
@@ -0,0 +1,1188 @@
+//==- CodeGen/TargetRegisterInfo.h - Target Register Information -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes an abstract interface used to get information about a
+// target machines register file.  This information is used for a variety of
+// purposed, especially register allocation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETREGISTERINFO_H
+#define LLVM_CODEGEN_TARGETREGISTERINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/MC/LaneBitmask.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachineValueType.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Printable.h"
+#include <cassert>
+#include <cstdint>
+#include <functional>
+
+namespace llvm {
+
+class BitVector;
+class LiveRegMatrix;
+class MachineFunction;
+class MachineInstr;
+class RegScavenger;
+class VirtRegMap;
+class LiveIntervals;
+
+class TargetRegisterClass {
+public:
+  using iterator = const MCPhysReg *;
+  using const_iterator = const MCPhysReg *;
+  using sc_iterator = const TargetRegisterClass* const *;
+
+  // Instance variables filled by tablegen, do not use!
+  const MCRegisterClass *MC;
+  const uint32_t *SubClassMask;
+  const uint16_t *SuperRegIndices;
+  const LaneBitmask LaneMask;
+  /// Classes with a higher priority value are assigned first by register
+  /// allocators using a greedy heuristic. The value is in the range [0,63].
+  const uint8_t AllocationPriority;
+  /// Whether the class supports two (or more) disjunct subregister indices.
+  const bool HasDisjunctSubRegs;
+  /// Whether a combination of subregisters can cover every register in the
+  /// class. See also the CoveredBySubRegs description in Target.td.
+  const bool CoveredBySubRegs;
+  const sc_iterator SuperClasses;
+  ArrayRef<MCPhysReg> (*OrderFunc)(const MachineFunction&);
+
+  /// Return the register class ID number.
+  unsigned getID() const { return MC->getID(); }
+
+  /// begin/end - Return all of the registers in this class.
+  ///
+  iterator       begin() const { return MC->begin(); }
+  iterator         end() const { return MC->end(); }
+
+  /// Return the number of registers in this class.
+  unsigned getNumRegs() const { return MC->getNumRegs(); }
+
+  iterator_range<SmallVectorImpl<MCPhysReg>::const_iterator>
+  getRegisters() const {
+    return make_range(MC->begin(), MC->end());
+  }
+
+  /// Return the specified register in the class.
+  unsigned getRegister(unsigned i) const {
+    return MC->getRegister(i);
+  }
+
+  /// Return true if the specified register is included in this register class.
+  /// This does not include virtual registers.
+  bool contains(unsigned Reg) const {
+    return MC->contains(Reg);
+  }
+
+  /// Return true if both registers are in this class.
+  bool contains(unsigned Reg1, unsigned Reg2) const {
+    return MC->contains(Reg1, Reg2);
+  }
+
+  /// Return the cost of copying a value between two registers in this class.
+  /// A negative number means the register class is very expensive
+  /// to copy e.g. status flag register classes.
+  int getCopyCost() const { return MC->getCopyCost(); }
+
+  /// Return true if this register class may be used to create virtual
+  /// registers.
+  bool isAllocatable() const { return MC->isAllocatable(); }
+
+  /// Return true if the specified TargetRegisterClass
+  /// is a proper sub-class of this TargetRegisterClass.
+  bool hasSubClass(const TargetRegisterClass *RC) const {
+    return RC != this && hasSubClassEq(RC);
+  }
+
+  /// Returns true if RC is a sub-class of or equal to this class.
+  bool hasSubClassEq(const TargetRegisterClass *RC) const {
+    unsigned ID = RC->getID();
+    return (SubClassMask[ID / 32] >> (ID % 32)) & 1;
+  }
+
+  /// Return true if the specified TargetRegisterClass is a
+  /// proper super-class of this TargetRegisterClass.
+  bool hasSuperClass(const TargetRegisterClass *RC) const {
+    return RC->hasSubClass(this);
+  }
+
+  /// Returns true if RC is a super-class of or equal to this class.
+  bool hasSuperClassEq(const TargetRegisterClass *RC) const {
+    return RC->hasSubClassEq(this);
+  }
+
+  /// Returns a bit vector of subclasses, including this one.
+  /// The vector is indexed by class IDs.
+  ///
+  /// To use it, consider the returned array as a chunk of memory that
+  /// contains an array of bits of size NumRegClasses. Each 32-bit chunk
+  /// contains a bitset of the ID of the subclasses in big-endian style.
+
+  /// I.e., the representation of the memory from left to right at the
+  /// bit level looks like:
+  /// [31 30 ... 1 0] [ 63 62 ... 33 32] ...
+  ///                     [ XXX NumRegClasses NumRegClasses - 1 ... ]
+  /// Where the number represents the class ID and XXX bits that
+  /// should be ignored.
+  ///
+  /// See the implementation of hasSubClassEq for an example of how it
+  /// can be used.
+  const uint32_t *getSubClassMask() const {
+    return SubClassMask;
+  }
+
+  /// Returns a 0-terminated list of sub-register indices that project some
+  /// super-register class into this register class. The list has an entry for
+  /// each Idx such that:
+  ///
+  ///   There exists SuperRC where:
+  ///     For all Reg in SuperRC:
+  ///       this->contains(Reg:Idx)
+  const uint16_t *getSuperRegIndices() const {
+    return SuperRegIndices;
+  }
+
+  /// Returns a NULL-terminated list of super-classes.  The
+  /// classes are ordered by ID which is also a topological ordering from large
+  /// to small classes.  The list does NOT include the current class.
+  sc_iterator getSuperClasses() const {
+    return SuperClasses;
+  }
+
+  /// Return true if this TargetRegisterClass is a subset
+  /// class of at least one other TargetRegisterClass.
+  bool isASubClass() const {
+    return SuperClasses[0] != nullptr;
+  }
+
+  /// Returns the preferred order for allocating registers from this register
+  /// class in MF. The raw order comes directly from the .td file and may
+  /// include reserved registers that are not allocatable.
+  /// Register allocators should also make sure to allocate
+  /// callee-saved registers only after all the volatiles are used. The
+  /// RegisterClassInfo class provides filtered allocation orders with
+  /// callee-saved registers moved to the end.
+  ///
+  /// The MachineFunction argument can be used to tune the allocatable
+  /// registers based on the characteristics of the function, subtarget, or
+  /// other criteria.
+  ///
+  /// By default, this method returns all registers in the class.
+  ArrayRef<MCPhysReg> getRawAllocationOrder(const MachineFunction &MF) const {
+    return OrderFunc ? OrderFunc(MF) : makeArrayRef(begin(), getNumRegs());
+  }
+
+  /// Returns the combination of all lane masks of register in this class.
+  /// The lane masks of the registers are the combination of all lane masks
+  /// of their subregisters. Returns 1 if there are no subregisters.
+  LaneBitmask getLaneMask() const {
+    return LaneMask;
+  }
+};
+
+/// Extra information, not in MCRegisterDesc, about registers.
+/// These are used by codegen, not by MC.
+struct TargetRegisterInfoDesc {
+  unsigned CostPerUse;          // Extra cost of instructions using register.
+  bool inAllocatableClass;      // Register belongs to an allocatable regclass.
+};
+
+/// Each TargetRegisterClass has a per register weight, and weight
+/// limit which must be less than the limits of its pressure sets.
+struct RegClassWeight {
+  unsigned RegWeight;
+  unsigned WeightLimit;
+};
+
+/// TargetRegisterInfo base class - We assume that the target defines a static
+/// array of TargetRegisterDesc objects that represent all of the machine
+/// registers that the target has.  As such, we simply have to track a pointer
+/// to this array so that we can turn register number into a register
+/// descriptor.
+///
+class TargetRegisterInfo : public MCRegisterInfo {
+public:
+  using regclass_iterator = const TargetRegisterClass * const *;
+  using vt_iterator = const MVT::SimpleValueType *;
+  struct RegClassInfo {
+    unsigned RegSize, SpillSize, SpillAlignment;
+    vt_iterator VTList;
+  };
+private:
+  const TargetRegisterInfoDesc *InfoDesc;     // Extra desc array for codegen
+  const char *const *SubRegIndexNames;        // Names of subreg indexes.
+  // Pointer to array of lane masks, one per sub-reg index.
+  const LaneBitmask *SubRegIndexLaneMasks;
+
+  regclass_iterator RegClassBegin, RegClassEnd;   // List of regclasses
+  LaneBitmask CoveringLanes;
+  const RegClassInfo *const RCInfos;
+  unsigned HwMode;
+
+protected:
+  TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
+                     regclass_iterator RegClassBegin,
+                     regclass_iterator RegClassEnd,
+                     const char *const *SRINames,
+                     const LaneBitmask *SRILaneMasks,
+                     LaneBitmask CoveringLanes,
+                     const RegClassInfo *const RSI,
+                     unsigned Mode = 0);
+  virtual ~TargetRegisterInfo();
+
+public:
+  // Register numbers can represent physical registers, virtual registers, and
+  // sometimes stack slots. The unsigned values are divided into these ranges:
+  //
+  //   0           Not a register, can be used as a sentinel.
+  //   [1;2^30)    Physical registers assigned by TableGen.
+  //   [2^30;2^31) Stack slots. (Rarely used.)
+  //   [2^31;2^32) Virtual registers assigned by MachineRegisterInfo.
+  //
+  // Further sentinels can be allocated from the small negative integers.
+  // DenseMapInfo<unsigned> uses -1u and -2u.
+
+  /// isStackSlot - Sometimes it is useful the be able to store a non-negative
+  /// frame index in a variable that normally holds a register. isStackSlot()
+  /// returns true if Reg is in the range used for stack slots.
+  ///
+  /// Note that isVirtualRegister() and isPhysicalRegister() cannot handle stack
+  /// slots, so if a variable may contains a stack slot, always check
+  /// isStackSlot() first.
+  ///
+  static bool isStackSlot(unsigned Reg) {
+    return int(Reg) >= (1 << 30);
+  }
+
+  /// Compute the frame index from a register value representing a stack slot.
+  static int stackSlot2Index(unsigned Reg) {
+    assert(isStackSlot(Reg) && "Not a stack slot");
+    return int(Reg - (1u << 30));
+  }
+
+  /// Convert a non-negative frame index to a stack slot register value.
+  static unsigned index2StackSlot(int FI) {
+    assert(FI >= 0 && "Cannot hold a negative frame index.");
+    return FI + (1u << 30);
+  }
+
+  /// Return true if the specified register number is in
+  /// the physical register namespace.
+  static bool isPhysicalRegister(unsigned Reg) {
+    assert(!isStackSlot(Reg) && "Not a register! Check isStackSlot() first.");
+    return int(Reg) > 0;
+  }
+
+  /// Return true if the specified register number is in
+  /// the virtual register namespace.
+  static bool isVirtualRegister(unsigned Reg) {
+    assert(!isStackSlot(Reg) && "Not a register! Check isStackSlot() first.");
+    return int(Reg) < 0;
+  }
+
+  /// Convert a virtual register number to a 0-based index.
+  /// The first virtual register in a function will get the index 0.
+  static unsigned virtReg2Index(unsigned Reg) {
+    assert(isVirtualRegister(Reg) && "Not a virtual register");
+    return Reg & ~(1u << 31);
+  }
+
+  /// Convert a 0-based index to a virtual register number.
+  /// This is the inverse operation of VirtReg2IndexFunctor below.
+  static unsigned index2VirtReg(unsigned Index) {
+    return Index | (1u << 31);
+  }
+
+  /// Return the size in bits of a register from class RC.
+  unsigned getRegSizeInBits(const TargetRegisterClass &RC) const {
+    return getRegClassInfo(RC).RegSize;
+  }
+
+  /// Return the size in bytes of the stack slot allocated to hold a spilled
+  /// copy of a register from class RC.
+  unsigned getSpillSize(const TargetRegisterClass &RC) const {
+    return getRegClassInfo(RC).SpillSize / 8;
+  }
+
+  /// Return the minimum required alignment in bytes for a spill slot for
+  /// a register of this class.
+  unsigned getSpillAlignment(const TargetRegisterClass &RC) const {
+    return getRegClassInfo(RC).SpillAlignment / 8;
+  }
+
+  /// Return true if the given TargetRegisterClass has the ValueType T.
+  bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const {
+    for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I)
+      if (MVT(*I) == T)
+        return true;
+    return false;
+  }
+
+  /// Loop over all of the value types that can be represented by values
+  /// in the given register class.
+  vt_iterator legalclasstypes_begin(const TargetRegisterClass &RC) const {
+    return getRegClassInfo(RC).VTList;
+  }
+
+  vt_iterator legalclasstypes_end(const TargetRegisterClass &RC) const {
+    vt_iterator I = legalclasstypes_begin(RC);
+    while (*I != MVT::Other)
+      ++I;
+    return I;
+  }
+
+  /// Returns the Register Class of a physical register of the given type,
+  /// picking the most sub register class of the right type that contains this
+  /// physreg.
+  const TargetRegisterClass *
+    getMinimalPhysRegClass(unsigned Reg, MVT VT = MVT::Other) const;
+
+  /// Return the maximal subclass of the given register class that is
+  /// allocatable or NULL.
+  const TargetRegisterClass *
+    getAllocatableClass(const TargetRegisterClass *RC) const;
+
+  /// Returns a bitset indexed by register number indicating if a register is
+  /// allocatable or not. If a register class is specified, returns the subset
+  /// for the class.
+  BitVector getAllocatableSet(const MachineFunction &MF,
+                              const TargetRegisterClass *RC = nullptr) const;
+
+  /// Return the additional cost of using this register instead
+  /// of other registers in its class.
+  unsigned getCostPerUse(unsigned RegNo) const {
+    return InfoDesc[RegNo].CostPerUse;
+  }
+
+  /// Return true if the register is in the allocation of any register class.
+  bool isInAllocatableClass(unsigned RegNo) const {
+    return InfoDesc[RegNo].inAllocatableClass;
+  }
+
+  /// Return the human-readable symbolic target-specific
+  /// name for the specified SubRegIndex.
+  const char *getSubRegIndexName(unsigned SubIdx) const {
+    assert(SubIdx && SubIdx < getNumSubRegIndices() &&
+           "This is not a subregister index");
+    return SubRegIndexNames[SubIdx-1];
+  }
+
+  /// Return a bitmask representing the parts of a register that are covered by
+  /// SubIdx \see LaneBitmask.
+  ///
+  /// SubIdx == 0 is allowed, it has the lane mask ~0u.
+  LaneBitmask getSubRegIndexLaneMask(unsigned SubIdx) const {
+    assert(SubIdx < getNumSubRegIndices() && "This is not a subregister index");
+    return SubRegIndexLaneMasks[SubIdx];
+  }
+
+  /// The lane masks returned by getSubRegIndexLaneMask() above can only be
+  /// used to determine if sub-registers overlap - they can't be used to
+  /// determine if a set of sub-registers completely cover another
+  /// sub-register.
+  ///
+  /// The X86 general purpose registers have two lanes corresponding to the
+  /// sub_8bit and sub_8bit_hi sub-registers. Both sub_32bit and sub_16bit have
+  /// lane masks '3', but the sub_16bit sub-register doesn't fully cover the
+  /// sub_32bit sub-register.
+  ///
+  /// On the other hand, the ARM NEON lanes fully cover their registers: The
+  /// dsub_0 sub-register is completely covered by the ssub_0 and ssub_1 lanes.
+  /// This is related to the CoveredBySubRegs property on register definitions.
+  ///
+  /// This function returns a bit mask of lanes that completely cover their
+  /// sub-registers. More precisely, given:
+  ///
+  ///   Covering = getCoveringLanes();
+  ///   MaskA = getSubRegIndexLaneMask(SubA);
+  ///   MaskB = getSubRegIndexLaneMask(SubB);
+  ///
+  /// If (MaskA & ~(MaskB & Covering)) == 0, then SubA is completely covered by
+  /// SubB.
+  LaneBitmask getCoveringLanes() const { return CoveringLanes; }
+
+  /// Returns true if the two registers are equal or alias each other.
+  /// The registers may be virtual registers.
+  bool regsOverlap(unsigned regA, unsigned regB) const {
+    if (regA == regB) return true;
+    if (isVirtualRegister(regA) || isVirtualRegister(regB))
+      return false;
+
+    // Regunits are numerically ordered. Find a common unit.
+    MCRegUnitIterator RUA(regA, this);
+    MCRegUnitIterator RUB(regB, this);
+    do {
+      if (*RUA == *RUB) return true;
+      if (*RUA < *RUB) ++RUA;
+      else             ++RUB;
+    } while (RUA.isValid() && RUB.isValid());
+    return false;
+  }
+
+  /// Returns true if Reg contains RegUnit.
+  bool hasRegUnit(unsigned Reg, unsigned RegUnit) const {
+    for (MCRegUnitIterator Units(Reg, this); Units.isValid(); ++Units)
+      if (*Units == RegUnit)
+        return true;
+    return false;
+  }
+
+  /// Returns the original SrcReg unless it is the target of a copy-like
+  /// operation, in which case we chain backwards through all such operations
+  /// to the ultimate source register.  If a physical register is encountered,
+  /// we stop the search.
+  virtual unsigned lookThruCopyLike(unsigned SrcReg,
+                                    const MachineRegisterInfo *MRI) const;
+
+  /// Return a null-terminated list of all of the callee-saved registers on
+  /// this target. The register should be in the order of desired callee-save
+  /// stack frame offset. The first register is closest to the incoming stack
+  /// pointer if stack grows down, and vice versa.
+  /// Notice: This function does not take into account disabled CSRs.
+  ///         In most cases you will want to use instead the function 
+  ///         getCalleeSavedRegs that is implemented in MachineRegisterInfo.
+  virtual const MCPhysReg*
+  getCalleeSavedRegs(const MachineFunction *MF) const = 0;
+
+  /// Return a mask of call-preserved registers for the given calling convention
+  /// on the current function. The mask should include all call-preserved
+  /// aliases. This is used by the register allocator to determine which
+  /// registers can be live across a call.
+  ///
+  /// The mask is an array containing (TRI::getNumRegs()+31)/32 entries.
+  /// A set bit indicates that all bits of the corresponding register are
+  /// preserved across the function call.  The bit mask is expected to be
+  /// sub-register complete, i.e. if A is preserved, so are all its
+  /// sub-registers.
+  ///
+  /// Bits are numbered from the LSB, so the bit for physical register Reg can
+  /// be found as (Mask[Reg / 32] >> Reg % 32) & 1.
+  ///
+  /// A NULL pointer means that no register mask will be used, and call
+  /// instructions should use implicit-def operands to indicate call clobbered
+  /// registers.
+  ///
+  virtual const uint32_t *getCallPreservedMask(const MachineFunction &MF,
+                                               CallingConv::ID) const {
+    // The default mask clobbers everything.  All targets should override.
+    return nullptr;
+  }
+
+  /// Return a register mask that clobbers everything.
+  virtual const uint32_t *getNoPreservedMask() const {
+    llvm_unreachable("target does not provide no preserved mask");
+  }
+
+  /// Return true if all bits that are set in mask \p mask0 are also set in
+  /// \p mask1.
+  bool regmaskSubsetEqual(const uint32_t *mask0, const uint32_t *mask1) const;
+
+  /// Return all the call-preserved register masks defined for this target.
+  virtual ArrayRef<const uint32_t *> getRegMasks() const = 0;
+  virtual ArrayRef<const char *> getRegMaskNames() const = 0;
+
+  /// Returns a bitset indexed by physical register number indicating if a
+  /// register is a special register that has particular uses and should be
+  /// considered unavailable at all times, e.g. stack pointer, return address.
+  /// A reserved register:
+  /// - is not allocatable
+  /// - is considered always live
+  /// - is ignored by liveness tracking
+  /// It is often necessary to reserve the super registers of a reserved
+  /// register as well, to avoid them getting allocated indirectly. You may use
+  /// markSuperRegs() and checkAllSuperRegsMarked() in this case.
+  virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0;
+
+  /// Returns true if PhysReg is unallocatable and constant throughout the
+  /// function.  Used by MachineRegisterInfo::isConstantPhysReg().
+  virtual bool isConstantPhysReg(unsigned PhysReg) const { return false; }
+
+  /// Physical registers that may be modified within a function but are
+  /// guaranteed to be restored before any uses. This is useful for targets that
+  /// have call sequences where a GOT register may be updated by the caller
+  /// prior to a call and is guaranteed to be restored (also by the caller)
+  /// after the call. 
+  virtual bool isCallerPreservedPhysReg(unsigned PhysReg,
+                                        const MachineFunction &MF) const {
+    return false;
+  }
+
+  /// Prior to adding the live-out mask to a stackmap or patchpoint
+  /// instruction, provide the target the opportunity to adjust it (mainly to
+  /// remove pseudo-registers that should be ignored).
+  virtual void adjustStackMapLiveOutMask(uint32_t *Mask) const {}
+
+  /// Return a super-register of the specified register
+  /// Reg so its sub-register of index SubIdx is Reg.
+  unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx,
+                               const TargetRegisterClass *RC) const {
+    return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC->MC);
+  }
+
+  /// Return a subclass of the specified register
+  /// class A so that each register in it has a sub-register of the
+  /// specified sub-register index which is in the specified register class B.
+  ///
+  /// TableGen will synthesize missing A sub-classes.
+  virtual const TargetRegisterClass *
+  getMatchingSuperRegClass(const TargetRegisterClass *A,
+                           const TargetRegisterClass *B, unsigned Idx) const;
+
+  // For a copy-like instruction that defines a register of class DefRC with
+  // subreg index DefSubReg, reading from another source with class SrcRC and
+  // subregister SrcSubReg return true if this is a preferable copy
+  // instruction or an earlier use should be used.
+  virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
+                                    unsigned DefSubReg,
+                                    const TargetRegisterClass *SrcRC,
+                                    unsigned SrcSubReg) const;
+
+  /// Returns the largest legal sub-class of RC that
+  /// supports the sub-register index Idx.
+  /// If no such sub-class exists, return NULL.
+  /// If all registers in RC already have an Idx sub-register, return RC.
+  ///
+  /// TableGen generates a version of this function that is good enough in most
+  /// cases.  Targets can override if they have constraints that TableGen
+  /// doesn't understand.  For example, the x86 sub_8bit sub-register index is
+  /// supported by the full GR32 register class in 64-bit mode, but only by the
+  /// GR32_ABCD regiister class in 32-bit mode.
+  ///
+  /// TableGen will synthesize missing RC sub-classes.
+  virtual const TargetRegisterClass *
+  getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const {
+    assert(Idx == 0 && "Target has no sub-registers");
+    return RC;
+  }
+
+  /// Return the subregister index you get from composing
+  /// two subregister indices.
+  ///
+  /// The special null sub-register index composes as the identity.
+  ///
+  /// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b)
+  /// returns c. Note that composeSubRegIndices does not tell you about illegal
+  /// compositions. If R does not have a subreg a, or R:a does not have a subreg
+  /// b, composeSubRegIndices doesn't tell you.
+  ///
+  /// The ARM register Q0 has two D subregs dsub_0:D0 and dsub_1:D1. It also has
+  /// ssub_0:S0 - ssub_3:S3 subregs.
+  /// If you compose subreg indices dsub_1, ssub_0 you get ssub_2.
+  unsigned composeSubRegIndices(unsigned a, unsigned b) const {
+    if (!a) return b;
+    if (!b) return a;
+    return composeSubRegIndicesImpl(a, b);
+  }
+
+  /// Transforms a LaneMask computed for one subregister to the lanemask that
+  /// would have been computed when composing the subsubregisters with IdxA
+  /// first. @sa composeSubRegIndices()
+  LaneBitmask composeSubRegIndexLaneMask(unsigned IdxA,
+                                         LaneBitmask Mask) const {
+    if (!IdxA)
+      return Mask;
+    return composeSubRegIndexLaneMaskImpl(IdxA, Mask);
+  }
+
+  /// Transform a lanemask given for a virtual register to the corresponding
+  /// lanemask before using subregister with index \p IdxA.
+  /// This is the reverse of composeSubRegIndexLaneMask(), assuming Mask is a
+  /// valie lane mask (no invalid bits set) the following holds:
+  /// X0 = composeSubRegIndexLaneMask(Idx, Mask)
+  /// X1 = reverseComposeSubRegIndexLaneMask(Idx, X0)
+  /// => X1 == Mask
+  LaneBitmask reverseComposeSubRegIndexLaneMask(unsigned IdxA,
+                                                LaneBitmask LaneMask) const {
+    if (!IdxA)
+      return LaneMask;
+    return reverseComposeSubRegIndexLaneMaskImpl(IdxA, LaneMask);
+  }
+
+  /// Debugging helper: dump register in human readable form to dbgs() stream.
+  static void dumpReg(unsigned Reg, unsigned SubRegIndex = 0,
+                      const TargetRegisterInfo* TRI = nullptr);
+
+protected:
+  /// Overridden by TableGen in targets that have sub-registers.
+  virtual unsigned composeSubRegIndicesImpl(unsigned, unsigned) const {
+    llvm_unreachable("Target has no sub-registers");
+  }
+
+  /// Overridden by TableGen in targets that have sub-registers.
+  virtual LaneBitmask
+  composeSubRegIndexLaneMaskImpl(unsigned, LaneBitmask) const {
+    llvm_unreachable("Target has no sub-registers");
+  }
+
+  virtual LaneBitmask reverseComposeSubRegIndexLaneMaskImpl(unsigned,
+                                                            LaneBitmask) const {
+    llvm_unreachable("Target has no sub-registers");
+  }
+
+public:
+  /// Find a common super-register class if it exists.
+  ///
+  /// Find a register class, SuperRC and two sub-register indices, PreA and
+  /// PreB, such that:
+  ///
+  ///   1. PreA + SubA == PreB + SubB  (using composeSubRegIndices()), and
+  ///
+  ///   2. For all Reg in SuperRC: Reg:PreA in RCA and Reg:PreB in RCB, and
+  ///
+  ///   3. SuperRC->getSize() >= max(RCA->getSize(), RCB->getSize()).
+  ///
+  /// SuperRC will be chosen such that no super-class of SuperRC satisfies the
+  /// requirements, and there is no register class with a smaller spill size
+  /// that satisfies the requirements.
+  ///
+  /// SubA and SubB must not be 0. Use getMatchingSuperRegClass() instead.
+  ///
+  /// Either of the PreA and PreB sub-register indices may be returned as 0. In
+  /// that case, the returned register class will be a sub-class of the
+  /// corresponding argument register class.
+  ///
+  /// The function returns NULL if no register class can be found.
+  const TargetRegisterClass*
+  getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
+                         const TargetRegisterClass *RCB, unsigned SubB,
+                         unsigned &PreA, unsigned &PreB) const;
+
+  //===--------------------------------------------------------------------===//
+  // Register Class Information
+  //
+protected:
+  const RegClassInfo &getRegClassInfo(const TargetRegisterClass &RC) const {
+    return RCInfos[getNumRegClasses() * HwMode + RC.getID()];
+  }
+
+public:
+  /// Register class iterators
+  regclass_iterator regclass_begin() const { return RegClassBegin; }
+  regclass_iterator regclass_end() const { return RegClassEnd; }
+  iterator_range<regclass_iterator> regclasses() const {
+    return make_range(regclass_begin(), regclass_end());
+  }
+
+  unsigned getNumRegClasses() const {
+    return (unsigned)(regclass_end()-regclass_begin());
+  }
+
+  /// Returns the register class associated with the enumeration value.
+  /// See class MCOperandInfo.
+  const TargetRegisterClass *getRegClass(unsigned i) const {
+    assert(i < getNumRegClasses() && "Register Class ID out of range");
+    return RegClassBegin[i];
+  }
+
+  /// Returns the name of the register class.
+  const char *getRegClassName(const TargetRegisterClass *Class) const {
+    return MCRegisterInfo::getRegClassName(Class->MC);
+  }
+
+  /// Find the largest common subclass of A and B.
+  /// Return NULL if there is no common subclass.
+  /// The common subclass should contain
+  /// simple value type SVT if it is not the Any type.
+  const TargetRegisterClass *
+  getCommonSubClass(const TargetRegisterClass *A,
+                    const TargetRegisterClass *B,
+                    const MVT::SimpleValueType SVT =
+                    MVT::SimpleValueType::Any) const;
+
+  /// Returns a TargetRegisterClass used for pointer values.
+  /// If a target supports multiple different pointer register classes,
+  /// kind specifies which one is indicated.
+  virtual const TargetRegisterClass *
+  getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const {
+    llvm_unreachable("Target didn't implement getPointerRegClass!");
+  }
+
+  /// Returns a legal register class to copy a register in the specified class
+  /// to or from. If it is possible to copy the register directly without using
+  /// a cross register class copy, return the specified RC. Returns NULL if it
+  /// is not possible to copy between two registers of the specified class.
+  virtual const TargetRegisterClass *
+  getCrossCopyRegClass(const TargetRegisterClass *RC) const {
+    return RC;
+  }
+
+  /// Returns the largest super class of RC that is legal to use in the current
+  /// sub-target and has the same spill size.
+  /// The returned register class can be used to create virtual registers which
+  /// means that all its registers can be copied and spilled.
+  virtual const TargetRegisterClass *
+  getLargestLegalSuperClass(const TargetRegisterClass *RC,
+                            const MachineFunction &) const {
+    /// The default implementation is very conservative and doesn't allow the
+    /// register allocator to inflate register classes.
+    return RC;
+  }
+
+  /// Return the register pressure "high water mark" for the specific register
+  /// class. The scheduler is in high register pressure mode (for the specific
+  /// register class) if it goes over the limit.
+  ///
+  /// Note: this is the old register pressure model that relies on a manually
+  /// specified representative register class per value type.
+  virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC,
+                                       MachineFunction &MF) const {
+    return 0;
+  }
+
+  /// Return a heuristic for the machine scheduler to compare the profitability
+  /// of increasing one register pressure set versus another.  The scheduler
+  /// will prefer increasing the register pressure of the set which returns
+  /// the largest value for this function.
+  virtual unsigned getRegPressureSetScore(const MachineFunction &MF,
+                                          unsigned PSetID) const {
+    return PSetID;
+  }
+
+  /// Get the weight in units of pressure for this register class.
+  virtual const RegClassWeight &getRegClassWeight(
+    const TargetRegisterClass *RC) const = 0;
+
+  /// Returns size in bits of a phys/virtual/generic register.
+  unsigned getRegSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI) const;
+
+  /// Get the weight in units of pressure for this register unit.
+  virtual unsigned getRegUnitWeight(unsigned RegUnit) const = 0;
+
+  /// Get the number of dimensions of register pressure.
+  virtual unsigned getNumRegPressureSets() const = 0;
+
+  /// Get the name of this register unit pressure set.
+  virtual const char *getRegPressureSetName(unsigned Idx) const = 0;
+
+  /// Get the register unit pressure limit for this dimension.
+  /// This limit must be adjusted dynamically for reserved registers.
+  virtual unsigned getRegPressureSetLimit(const MachineFunction &MF,
+                                          unsigned Idx) const = 0;
+
+  /// Get the dimensions of register pressure impacted by this register class.
+  /// Returns a -1 terminated array of pressure set IDs.
+  virtual const int *getRegClassPressureSets(
+    const TargetRegisterClass *RC) const = 0;
+
+  /// Get the dimensions of register pressure impacted by this register unit.
+  /// Returns a -1 terminated array of pressure set IDs.
+  virtual const int *getRegUnitPressureSets(unsigned RegUnit) const = 0;
+
+  /// Get a list of 'hint' registers that the register allocator should try
+  /// first when allocating a physical register for the virtual register
+  /// VirtReg. These registers are effectively moved to the front of the
+  /// allocation order. If true is returned, regalloc will try to only use
+  /// hints to the greatest extent possible even if it means spilling.
+  ///
+  /// The Order argument is the allocation order for VirtReg's register class
+  /// as returned from RegisterClassInfo::getOrder(). The hint registers must
+  /// come from Order, and they must not be reserved.
+  ///
+  /// The default implementation of this function will only add target
+  /// independent register allocation hints. Targets that override this
+  /// function should typically call this default implementation as well and
+  /// expect to see generic copy hints added.
+  virtual bool getRegAllocationHints(unsigned VirtReg,
+                                     ArrayRef<MCPhysReg> Order,
+                                     SmallVectorImpl<MCPhysReg> &Hints,
+                                     const MachineFunction &MF,
+                                     const VirtRegMap *VRM = nullptr,
+                                     const LiveRegMatrix *Matrix = nullptr)
+    const;
+
+  /// A callback to allow target a chance to update register allocation hints
+  /// when a register is "changed" (e.g. coalesced) to another register.
+  /// e.g. On ARM, some virtual registers should target register pairs,
+  /// if one of pair is coalesced to another register, the allocation hint of
+  /// the other half of the pair should be changed to point to the new register.
+  virtual void updateRegAllocHint(unsigned Reg, unsigned NewReg,
+                                  MachineFunction &MF) const {
+    // Do nothing.
+  }
+
+  /// The creation of multiple copy hints have been implemented in
+  /// weightCalcHelper(), but since this affects so many tests for many
+  /// targets, this is temporarily disabled per default. THIS SHOULD BE
+  /// "GENERAL GOODNESS" and hopefully all targets will update their tests
+  /// and enable this soon. This hook should then be removed.
+  virtual bool enableMultipleCopyHints() const { return false; }
+
+  /// Allow the target to reverse allocation order of local live ranges. This
+  /// will generally allocate shorter local live ranges first. For targets with
+  /// many registers, this could reduce regalloc compile time by a large
+  /// factor. It is disabled by default for three reasons:
+  /// (1) Top-down allocation is simpler and easier to debug for targets that
+  /// don't benefit from reversing the order.
+  /// (2) Bottom-up allocation could result in poor evicition decisions on some
+  /// targets affecting the performance of compiled code.
+  /// (3) Bottom-up allocation is no longer guaranteed to optimally color.
+  virtual bool reverseLocalAssignment() const { return false; }
+
+  /// Allow the target to override the cost of using a callee-saved register for
+  /// the first time. Default value of 0 means we will use a callee-saved
+  /// register if it is available.
+  virtual unsigned getCSRFirstUseCost() const { return 0; }
+
+  /// Returns true if the target requires (and can make use of) the register
+  /// scavenger.
+  virtual bool requiresRegisterScavenging(const MachineFunction &MF) const {
+    return false;
+  }
+
+  /// Returns true if the target wants to use frame pointer based accesses to
+  /// spill to the scavenger emergency spill slot.
+  virtual bool useFPForScavengingIndex(const MachineFunction &MF) const {
+    return true;
+  }
+
+  /// Returns true if the target requires post PEI scavenging of registers for
+  /// materializing frame index constants.
+  virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const {
+    return false;
+  }
+
+  /// Returns true if the target requires using the RegScavenger directly for
+  /// frame elimination despite using requiresFrameIndexScavenging.
+  virtual bool requiresFrameIndexReplacementScavenging(
+      const MachineFunction &MF) const {
+    return false;
+  }
+
+  /// Returns true if the target wants the LocalStackAllocation pass to be run
+  /// and virtual base registers used for more efficient stack access.
+  virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const {
+    return false;
+  }
+
+  /// Return true if target has reserved a spill slot in the stack frame of
+  /// the given function for the specified register. e.g. On x86, if the frame
+  /// register is required, the first fixed stack object is reserved as its
+  /// spill slot. This tells PEI not to create a new stack frame
+  /// object for the given register. It should be called only after
+  /// determineCalleeSaves().
+  virtual bool hasReservedSpillSlot(const MachineFunction &MF, unsigned Reg,
+                                    int &FrameIdx) const {
+    return false;
+  }
+
+  /// Returns true if the live-ins should be tracked after register allocation.
+  virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
+    return false;
+  }
+
+  /// True if the stack can be realigned for the target.
+  virtual bool canRealignStack(const MachineFunction &MF) const;
+
+  /// True if storage within the function requires the stack pointer to be
+  /// aligned more than the normal calling convention calls for.
+  /// This cannot be overriden by the target, but canRealignStack can be
+  /// overridden.
+  bool needsStackRealignment(const MachineFunction &MF) const;
+
+  /// Get the offset from the referenced frame index in the instruction,
+  /// if there is one.
+  virtual int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
+                                           int Idx) const {
+    return 0;
+  }
+
+  /// Returns true if the instruction's frame index reference would be better
+  /// served by a base register other than FP or SP.
+  /// Used by LocalStackFrameAllocation to determine which frame index
+  /// references it should create new base registers for.
+  virtual bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
+    return false;
+  }
+
+  /// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
+  /// before insertion point I.
+  virtual void materializeFrameBaseRegister(MachineBasicBlock *MBB,
+                                            unsigned BaseReg, int FrameIdx,
+                                            int64_t Offset) const {
+    llvm_unreachable("materializeFrameBaseRegister does not exist on this "
+                     "target");
+  }
+
+  /// Resolve a frame index operand of an instruction
+  /// to reference the indicated base register plus offset instead.
+  virtual void resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
+                                 int64_t Offset) const {
+    llvm_unreachable("resolveFrameIndex does not exist on this target");
+  }
+
+  /// Determine whether a given base register plus offset immediate is
+  /// encodable to resolve a frame index.
+  virtual bool isFrameOffsetLegal(const MachineInstr *MI, unsigned BaseReg,
+                                  int64_t Offset) const {
+    llvm_unreachable("isFrameOffsetLegal does not exist on this target");
+  }
+
+  /// Spill the register so it can be used by the register scavenger.
+  /// Return true if the register was spilled, false otherwise.
+  /// If this function does not spill the register, the scavenger
+  /// will instead spill it to the emergency spill slot.
+  virtual bool saveScavengerRegister(MachineBasicBlock &MBB,
+                                     MachineBasicBlock::iterator I,
+                                     MachineBasicBlock::iterator &UseMI,
+                                     const TargetRegisterClass *RC,
+                                     unsigned Reg) const {
+    return false;
+  }
+
+  /// This method must be overriden to eliminate abstract frame indices from
+  /// instructions which may use them. The instruction referenced by the
+  /// iterator contains an MO_FrameIndex operand which must be eliminated by
+  /// this method. This method may modify or replace the specified instruction,
+  /// as long as it keeps the iterator pointing at the finished product.
+  /// SPAdj is the SP adjustment due to call frame setup instruction.
+  /// FIOperandNum is the FI operand number.
+  virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
+                                   int SPAdj, unsigned FIOperandNum,
+                                   RegScavenger *RS = nullptr) const = 0;
+
+  /// Return the assembly name for \p Reg.
+  virtual StringRef getRegAsmName(unsigned Reg) const {
+    // FIXME: We are assuming that the assembly name is equal to the TableGen
+    // name converted to lower case
+    //
+    // The TableGen name is the name of the definition for this register in the
+    // target's tablegen files.  For example, the TableGen name of
+    // def EAX : Register <...>; is "EAX"
+    return StringRef(getName(Reg));
+  }
+
+  //===--------------------------------------------------------------------===//
+  /// Subtarget Hooks
+
+  /// \brief SrcRC and DstRC will be morphed into NewRC if this returns true.
+  virtual bool shouldCoalesce(MachineInstr *MI,
+                              const TargetRegisterClass *SrcRC,
+                              unsigned SubReg,
+                              const TargetRegisterClass *DstRC,
+                              unsigned DstSubReg,
+                              const TargetRegisterClass *NewRC,
+                              LiveIntervals &LIS) const
+  { return true; }
+
+  //===--------------------------------------------------------------------===//
+  /// Debug information queries.
+
+  /// getFrameRegister - This method should return the register used as a base
+  /// for values allocated in the current stack frame.
+  virtual unsigned getFrameRegister(const MachineFunction &MF) const = 0;
+
+  /// Mark a register and all its aliases as reserved in the given set.
+  void markSuperRegs(BitVector &RegisterSet, unsigned Reg) const;
+
+  /// Returns true if for every register in the set all super registers are part
+  /// of the set as well.
+  bool checkAllSuperRegsMarked(const BitVector &RegisterSet,
+      ArrayRef<MCPhysReg> Exceptions = ArrayRef<MCPhysReg>()) const;
+};
+
+//===----------------------------------------------------------------------===//
+//                           SuperRegClassIterator
+//===----------------------------------------------------------------------===//
+//
+// Iterate over the possible super-registers for a given register class. The
+// iterator will visit a list of pairs (Idx, Mask) corresponding to the
+// possible classes of super-registers.
+//
+// Each bit mask will have at least one set bit, and each set bit in Mask
+// corresponds to a SuperRC such that:
+//
+//   For all Reg in SuperRC: Reg:Idx is in RC.
+//
+// The iterator can include (O, RC->getSubClassMask()) as the first entry which
+// also satisfies the above requirement, assuming Reg:0 == Reg.
+//
+class SuperRegClassIterator {
+  const unsigned RCMaskWords;
+  unsigned SubReg = 0;
+  const uint16_t *Idx;
+  const uint32_t *Mask;
+
+public:
+  /// Create a SuperRegClassIterator that visits all the super-register classes
+  /// of RC. When IncludeSelf is set, also include the (0, sub-classes) entry.
+  SuperRegClassIterator(const TargetRegisterClass *RC,
+                        const TargetRegisterInfo *TRI,
+                        bool IncludeSelf = false)
+    : RCMaskWords((TRI->getNumRegClasses() + 31) / 32),
+      Idx(RC->getSuperRegIndices()), Mask(RC->getSubClassMask()) {
+    if (!IncludeSelf)
+      ++*this;
+  }
+
+  /// Returns true if this iterator is still pointing at a valid entry.
+  bool isValid() const { return Idx; }
+
+  /// Returns the current sub-register index.
+  unsigned getSubReg() const { return SubReg; }
+
+  /// Returns the bit mask of register classes that getSubReg() projects into
+  /// RC.
+  /// See TargetRegisterClass::getSubClassMask() for how to use it.
+  const uint32_t *getMask() const { return Mask; }
+
+  /// Advance iterator to the next entry.
+  void operator++() {
+    assert(isValid() && "Cannot move iterator past end.");
+    Mask += RCMaskWords;
+    SubReg = *Idx++;
+    if (!SubReg)
+      Idx = nullptr;
+  }
+};
+
+//===----------------------------------------------------------------------===//
+//                           BitMaskClassIterator
+//===----------------------------------------------------------------------===//
+/// This class encapuslates the logic to iterate over bitmask returned by
+/// the various RegClass related APIs.
+/// E.g., this class can be used to iterate over the subclasses provided by
+/// TargetRegisterClass::getSubClassMask or SuperRegClassIterator::getMask.
+class BitMaskClassIterator {
+  /// Total number of register classes.
+  const unsigned NumRegClasses;
+  /// Base index of CurrentChunk.
+  /// In other words, the number of bit we read to get at the
+  /// beginning of that chunck.
+  unsigned Base = 0;
+  /// Adjust base index of CurrentChunk.
+  /// Base index + how many bit we read within CurrentChunk.
+  unsigned Idx = 0;
+  /// Current register class ID.
+  unsigned ID = 0;
+  /// Mask we are iterating over.
+  const uint32_t *Mask;
+  /// Current chunk of the Mask we are traversing.
+  uint32_t CurrentChunk;
+
+  /// Move ID to the next set bit.
+  void moveToNextID() {
+    // If the current chunk of memory is empty, move to the next one,
+    // while making sure we do not go pass the number of register
+    // classes.
+    while (!CurrentChunk) {
+      // Move to the next chunk.
+      Base += 32;
+      if (Base >= NumRegClasses) {
+        ID = NumRegClasses;
+        return;
+      }
+      CurrentChunk = *++Mask;
+      Idx = Base;
+    }
+    // Otherwise look for the first bit set from the right
+    // (representation of the class ID is big endian).
+    // See getSubClassMask for more details on the representation.
+    unsigned Offset = countTrailingZeros(CurrentChunk);
+    // Add the Offset to the adjusted base number of this chunk: Idx.
+    // This is the ID of the register class.
+    ID = Idx + Offset;
+
+    // Consume the zeros, if any, and the bit we just read
+    // so that we are at the right spot for the next call.
+    // Do not do Offset + 1 because Offset may be 31 and 32
+    // will be UB for the shift, though in that case we could
+    // have make the chunk being equal to 0, but that would
+    // have introduced a if statement.
+    moveNBits(Offset);
+    moveNBits(1);
+  }
+
+  /// Move \p NumBits Bits forward in CurrentChunk.
+  void moveNBits(unsigned NumBits) {
+    assert(NumBits < 32 && "Undefined behavior spotted!");
+    // Consume the bit we read for the next call.
+    CurrentChunk >>= NumBits;
+    // Adjust the base for the chunk.
+    Idx += NumBits;
+  }
+
+public:
+  /// Create a BitMaskClassIterator that visits all the register classes
+  /// represented by \p Mask.
+  ///
+  /// \pre \p Mask != nullptr
+  BitMaskClassIterator(const uint32_t *Mask, const TargetRegisterInfo &TRI)
+      : NumRegClasses(TRI.getNumRegClasses()), Mask(Mask), CurrentChunk(*Mask) {
+    // Move to the first ID.
+    moveToNextID();
+  }
+
+  /// Returns true if this iterator is still pointing at a valid entry.
+  bool isValid() const { return getID() != NumRegClasses; }
+
+  /// Returns the current register class ID.
+  unsigned getID() const { return ID; }
+
+  /// Advance iterator to the next entry.
+  void operator++() {
+    assert(isValid() && "Cannot move iterator past end.");
+    moveToNextID();
+  }
+};
+
+// This is useful when building IndexedMaps keyed on virtual registers
+struct VirtReg2IndexFunctor {
+  using argument_type = unsigned;
+  unsigned operator()(unsigned Reg) const {
+    return TargetRegisterInfo::virtReg2Index(Reg);
+  }
+};
+
+/// Prints virtual and physical registers with or without a TRI instance.
+///
+/// The format is:
+///   %noreg          - NoRegister
+///   %5              - a virtual register.
+///   %5:sub_8bit     - a virtual register with sub-register index (with TRI).
+///   %eax            - a physical register
+///   %physreg17      - a physical register when no TRI instance given.
+///
+/// Usage: OS << printReg(Reg, TRI, SubRegIdx) << '\n';
+Printable printReg(unsigned Reg, const TargetRegisterInfo *TRI = nullptr,
+                   unsigned SubRegIdx = 0,
+                   const MachineRegisterInfo *MRI = nullptr);
+
+/// Create Printable object to print register units on a \ref raw_ostream.
+///
+/// Register units are named after their root registers:
+///
+///   al      - Single root.
+///   fp0~st7 - Dual roots.
+///
+/// Usage: OS << printRegUnit(Unit, TRI) << '\n';
+Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI);
+
+/// \brief Create Printable object to print virtual registers and physical
+/// registers on a \ref raw_ostream.
+Printable printVRegOrUnit(unsigned VRegOrUnit, const TargetRegisterInfo *TRI);
+
+/// \brief Create Printable object to print register classes or register banks
+/// on a \ref raw_ostream.
+Printable printRegClassOrBank(unsigned Reg, const MachineRegisterInfo &RegInfo,
+                              const TargetRegisterInfo *TRI);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_TARGETREGISTERINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetSchedule.h b/linux-x64/clang/include/llvm/CodeGen/TargetSchedule.h
new file mode 100644
index 0000000..1044f0b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetSchedule.h
@@ -0,0 +1,204 @@
+//===- llvm/CodeGen/TargetSchedule.h - Sched Machine Model ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a wrapper around MCSchedModel that allows the interface to
+// benefit from information currently only available in TargetInstrInfo.
+// Ideally, the scheduling interface would be fully defined in the MC layer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETSCHEDULE_H
+#define LLVM_CODEGEN_TARGETSCHEDULE_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/MC/MCSchedule.h"
+
+namespace llvm {
+
+class MachineInstr;
+class TargetInstrInfo;
+
+/// Provide an instruction scheduling machine model to CodeGen passes.
+class TargetSchedModel {
+  // For efficiency, hold a copy of the statically defined MCSchedModel for this
+  // processor.
+  MCSchedModel SchedModel;
+  InstrItineraryData InstrItins;
+  const TargetSubtargetInfo *STI = nullptr;
+  const TargetInstrInfo *TII = nullptr;
+
+  SmallVector<unsigned, 16> ResourceFactors;
+  unsigned MicroOpFactor; // Multiply to normalize microops to resource units.
+  unsigned ResourceLCM;   // Resource units per cycle. Latency normalization factor.
+
+  unsigned computeInstrLatency(const MCSchedClassDesc &SCDesc) const;
+
+public:
+  TargetSchedModel() : SchedModel(MCSchedModel::GetDefaultSchedModel()) {}
+
+  /// \brief Initialize the machine model for instruction scheduling.
+  ///
+  /// The machine model API keeps a copy of the top-level MCSchedModel table
+  /// indices and may query TargetSubtargetInfo and TargetInstrInfo to resolve
+  /// dynamic properties.
+  void init(const MCSchedModel &sm, const TargetSubtargetInfo *sti,
+            const TargetInstrInfo *tii);
+
+  /// Return the MCSchedClassDesc for this instruction.
+  const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const;
+
+  /// \brief TargetSubtargetInfo getter.
+  const TargetSubtargetInfo *getSubtargetInfo() const { return STI; }
+
+  /// \brief TargetInstrInfo getter.
+  const TargetInstrInfo *getInstrInfo() const { return TII; }
+
+  /// \brief Return true if this machine model includes an instruction-level
+  /// scheduling model.
+  ///
+  /// This is more detailed than the course grain IssueWidth and default
+  /// latency properties, but separate from the per-cycle itinerary data.
+  bool hasInstrSchedModel() const;
+
+  const MCSchedModel *getMCSchedModel() const { return &SchedModel; }
+
+  /// \brief Return true if this machine model includes cycle-to-cycle itinerary
+  /// data.
+  ///
+  /// This models scheduling at each stage in the processor pipeline.
+  bool hasInstrItineraries() const;
+
+  const InstrItineraryData *getInstrItineraries() const {
+    if (hasInstrItineraries())
+      return &InstrItins;
+    return nullptr;
+  }
+
+  /// \brief Return true if this machine model includes an instruction-level
+  /// scheduling model or cycle-to-cycle itinerary data.
+  bool hasInstrSchedModelOrItineraries() const {
+    return hasInstrSchedModel() || hasInstrItineraries();
+  }
+
+  /// \brief Identify the processor corresponding to the current subtarget.
+  unsigned getProcessorID() const { return SchedModel.getProcessorID(); }
+
+  /// \brief Maximum number of micro-ops that may be scheduled per cycle.
+  unsigned getIssueWidth() const { return SchedModel.IssueWidth; }
+
+  /// \brief Return true if new group must begin.
+  bool mustBeginGroup(const MachineInstr *MI,
+                          const MCSchedClassDesc *SC = nullptr) const;
+  /// \brief Return true if current group must end.
+  bool mustEndGroup(const MachineInstr *MI,
+                          const MCSchedClassDesc *SC = nullptr) const;
+
+  /// \brief Return the number of issue slots required for this MI.
+  unsigned getNumMicroOps(const MachineInstr *MI,
+                          const MCSchedClassDesc *SC = nullptr) const;
+
+  /// \brief Get the number of kinds of resources for this target.
+  unsigned getNumProcResourceKinds() const {
+    return SchedModel.getNumProcResourceKinds();
+  }
+
+  /// \brief Get a processor resource by ID for convenience.
+  const MCProcResourceDesc *getProcResource(unsigned PIdx) const {
+    return SchedModel.getProcResource(PIdx);
+  }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+  const char *getResourceName(unsigned PIdx) const {
+    if (!PIdx)
+      return "MOps";
+    return SchedModel.getProcResource(PIdx)->Name;
+  }
+#endif
+
+  using ProcResIter = const MCWriteProcResEntry *;
+
+  // \brief Get an iterator into the processor resources consumed by this
+  // scheduling class.
+  ProcResIter getWriteProcResBegin(const MCSchedClassDesc *SC) const {
+    // The subtarget holds a single resource table for all processors.
+    return STI->getWriteProcResBegin(SC);
+  }
+  ProcResIter getWriteProcResEnd(const MCSchedClassDesc *SC) const {
+    return STI->getWriteProcResEnd(SC);
+  }
+
+  /// \brief Multiply the number of units consumed for a resource by this factor
+  /// to normalize it relative to other resources.
+  unsigned getResourceFactor(unsigned ResIdx) const {
+    return ResourceFactors[ResIdx];
+  }
+
+  /// \brief Multiply number of micro-ops by this factor to normalize it
+  /// relative to other resources.
+  unsigned getMicroOpFactor() const {
+    return MicroOpFactor;
+  }
+
+  /// \brief Multiply cycle count by this factor to normalize it relative to
+  /// other resources. This is the number of resource units per cycle.
+  unsigned getLatencyFactor() const {
+    return ResourceLCM;
+  }
+
+  /// \brief Number of micro-ops that may be buffered for OOO execution.
+  unsigned getMicroOpBufferSize() const { return SchedModel.MicroOpBufferSize; }
+
+  /// \brief Number of resource units that may be buffered for OOO execution.
+  /// \return The buffer size in resource units or -1 for unlimited.
+  int getResourceBufferSize(unsigned PIdx) const {
+    return SchedModel.getProcResource(PIdx)->BufferSize;
+  }
+
+  /// \brief Compute operand latency based on the available machine model.
+  ///
+  /// Compute and return the latency of the given data dependent def and use
+  /// when the operand indices are already known. UseMI may be NULL for an
+  /// unknown user.
+  unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
+                                 const MachineInstr *UseMI, unsigned UseOperIdx)
+    const;
+
+  /// \brief Compute the instruction latency based on the available machine
+  /// model.
+  ///
+  /// Compute and return the expected latency of this instruction independent of
+  /// a particular use. computeOperandLatency is the preferred API, but this is
+  /// occasionally useful to help estimate instruction cost.
+  ///
+  /// If UseDefaultDefLatency is false and no new machine sched model is
+  /// present this method falls back to TII->getInstrLatency with an empty
+  /// instruction itinerary (this is so we preserve the previous behavior of the
+  /// if converter after moving it to TargetSchedModel).
+  unsigned computeInstrLatency(const MachineInstr *MI,
+                               bool UseDefaultDefLatency = true) const;
+  unsigned computeInstrLatency(unsigned Opcode) const;
+
+
+  /// \brief Output dependency latency of a pair of defs of the same register.
+  ///
+  /// This is typically one cycle.
+  unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefIdx,
+                                const MachineInstr *DepMI) const;
+
+  /// \brief Compute the reciprocal throughput of the given instruction.
+  Optional<double> computeInstrRThroughput(const MachineInstr *MI) const;
+  Optional<double> computeInstrRThroughput(unsigned Opcode) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_TARGETSCHEDULE_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/TargetSubtargetInfo.h b/linux-x64/clang/include/llvm/CodeGen/TargetSubtargetInfo.h
new file mode 100644
index 0000000..5e5faac
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/TargetSubtargetInfo.h
@@ -0,0 +1,261 @@
+//===- llvm/CodeGen/TargetSubtargetInfo.h - Target Information --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the subtarget options of a Target machine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETSUBTARGETINFO_H
+#define LLVM_CODEGEN_TARGETSUBTARGETINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/PBQPRAConstraint.h"
+#include "llvm/CodeGen/ScheduleDAGMutation.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/CodeGen.h"
+#include <memory>
+#include <vector>
+
+
+namespace llvm {
+
+class CallLowering;
+class InstrItineraryData;
+struct InstrStage;
+class InstructionSelector;
+class LegalizerInfo;
+class MachineInstr;
+struct MachineSchedPolicy;
+struct MCReadAdvanceEntry;
+struct MCWriteLatencyEntry;
+struct MCWriteProcResEntry;
+class RegisterBankInfo;
+class SDep;
+class SelectionDAGTargetInfo;
+struct SubtargetFeatureKV;
+struct SubtargetInfoKV;
+class SUnit;
+class TargetFrameLowering;
+class TargetInstrInfo;
+class TargetLowering;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+class TargetSchedModel;
+class Triple;
+
+//===----------------------------------------------------------------------===//
+///
+/// TargetSubtargetInfo - Generic base class for all target subtargets.  All
+/// Target-specific options that control code generation and printing should
+/// be exposed through a TargetSubtargetInfo-derived class.
+///
+class TargetSubtargetInfo : public MCSubtargetInfo {
+protected: // Can only create subclasses...
+  TargetSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS,
+                      ArrayRef<SubtargetFeatureKV> PF,
+                      ArrayRef<SubtargetFeatureKV> PD,
+                      const SubtargetInfoKV *ProcSched,
+                      const MCWriteProcResEntry *WPR,
+                      const MCWriteLatencyEntry *WL,
+                      const MCReadAdvanceEntry *RA, const InstrStage *IS,
+                      const unsigned *OC, const unsigned *FP);
+
+public:
+  // AntiDepBreakMode - Type of anti-dependence breaking that should
+  // be performed before post-RA scheduling.
+  using AntiDepBreakMode = enum { ANTIDEP_NONE, ANTIDEP_CRITICAL, ANTIDEP_ALL };
+  using RegClassVector = SmallVectorImpl<const TargetRegisterClass *>;
+
+  TargetSubtargetInfo() = delete;
+  TargetSubtargetInfo(const TargetSubtargetInfo &) = delete;
+  TargetSubtargetInfo &operator=(const TargetSubtargetInfo &) = delete;
+  ~TargetSubtargetInfo() override;
+
+  virtual bool isXRaySupported() const { return false; }
+
+  // Interfaces to the major aspects of target machine information:
+  //
+  // -- Instruction opcode and operand information
+  // -- Pipelines and scheduling information
+  // -- Stack frame information
+  // -- Selection DAG lowering information
+  // -- Call lowering information
+  //
+  // N.B. These objects may change during compilation. It's not safe to cache
+  // them between functions.
+  virtual const TargetInstrInfo *getInstrInfo() const { return nullptr; }
+  virtual const TargetFrameLowering *getFrameLowering() const {
+    return nullptr;
+  }
+  virtual const TargetLowering *getTargetLowering() const { return nullptr; }
+  virtual const SelectionDAGTargetInfo *getSelectionDAGInfo() const {
+    return nullptr;
+  }
+  virtual const CallLowering *getCallLowering() const { return nullptr; }
+
+  // FIXME: This lets targets specialize the selector by subtarget (which lets
+  // us do things like a dedicated avx512 selector).  However, we might want
+  // to also specialize selectors by MachineFunction, which would let us be
+  // aware of optsize/optnone and such.
+  virtual const InstructionSelector *getInstructionSelector() const {
+    return nullptr;
+  }
+
+  virtual unsigned getHwMode() const { return 0; }
+
+  /// Target can subclass this hook to select a different DAG scheduler.
+  virtual RegisterScheduler::FunctionPassCtor
+      getDAGScheduler(CodeGenOpt::Level) const {
+    return nullptr;
+  }
+
+  virtual const LegalizerInfo *getLegalizerInfo() const { return nullptr; }
+
+  /// getRegisterInfo - If register information is available, return it.  If
+  /// not, return null.
+  virtual const TargetRegisterInfo *getRegisterInfo() const { return nullptr; }
+
+  /// If the information for the register banks is available, return it.
+  /// Otherwise return nullptr.
+  virtual const RegisterBankInfo *getRegBankInfo() const { return nullptr; }
+
+  /// getInstrItineraryData - Returns instruction itinerary data for the target
+  /// or specific subtarget.
+  virtual const InstrItineraryData *getInstrItineraryData() const {
+    return nullptr;
+  }
+
+  /// Resolve a SchedClass at runtime, where SchedClass identifies an
+  /// MCSchedClassDesc with the isVariant property. This may return the ID of
+  /// another variant SchedClass, but repeated invocation must quickly terminate
+  /// in a nonvariant SchedClass.
+  virtual unsigned resolveSchedClass(unsigned SchedClass,
+                                     const MachineInstr *MI,
+                                     const TargetSchedModel *SchedModel) const {
+    return 0;
+  }
+
+  /// \brief True if the subtarget should run MachineScheduler after aggressive
+  /// coalescing.
+  ///
+  /// This currently replaces the SelectionDAG scheduler with the "source" order
+  /// scheduler (though see below for an option to turn this off and use the
+  /// TargetLowering preference). It does not yet disable the postRA scheduler.
+  virtual bool enableMachineScheduler() const;
+
+  /// \brief Support printing of [latency:throughput] comment in output .S file.
+  virtual bool supportPrintSchedInfo() const { return false; }
+
+  /// \brief True if the machine scheduler should disable the TLI preference
+  /// for preRA scheduling with the source level scheduler.
+  virtual bool enableMachineSchedDefaultSched() const { return true; }
+
+  /// \brief True if the subtarget should enable joining global copies.
+  ///
+  /// By default this is enabled if the machine scheduler is enabled, but
+  /// can be overridden.
+  virtual bool enableJoinGlobalCopies() const;
+
+  /// True if the subtarget should run a scheduler after register allocation.
+  ///
+  /// By default this queries the PostRAScheduling bit in the scheduling model
+  /// which is the preferred way to influence this.
+  virtual bool enablePostRAScheduler() const;
+
+  /// \brief True if the subtarget should run the atomic expansion pass.
+  virtual bool enableAtomicExpand() const;
+
+  /// True if the subtarget should run the indirectbr expansion pass.
+  virtual bool enableIndirectBrExpand() const;
+
+  /// \brief Override generic scheduling policy within a region.
+  ///
+  /// This is a convenient way for targets that don't provide any custom
+  /// scheduling heuristics (no custom MachineSchedStrategy) to make
+  /// changes to the generic scheduling policy.
+  virtual void overrideSchedPolicy(MachineSchedPolicy &Policy,
+                                   unsigned NumRegionInstrs) const {}
+
+  // \brief Perform target specific adjustments to the latency of a schedule
+  // dependency.
+  virtual void adjustSchedDependency(SUnit *def, SUnit *use, SDep &dep) const {}
+
+  // For use with PostRAScheduling: get the anti-dependence breaking that should
+  // be performed before post-RA scheduling.
+  virtual AntiDepBreakMode getAntiDepBreakMode() const { return ANTIDEP_NONE; }
+
+  // For use with PostRAScheduling: in CriticalPathRCs, return any register
+  // classes that should only be considered for anti-dependence breaking if they
+  // are on the critical path.
+  virtual void getCriticalPathRCs(RegClassVector &CriticalPathRCs) const {
+    return CriticalPathRCs.clear();
+  }
+
+  // \brief Provide an ordered list of schedule DAG mutations for the post-RA
+  // scheduler.
+  virtual void getPostRAMutations(
+      std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
+  }
+
+  // \brief Provide an ordered list of schedule DAG mutations for the machine
+  // pipeliner.
+  virtual void getSMSMutations(
+      std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
+  }
+
+  // For use with PostRAScheduling: get the minimum optimization level needed
+  // to enable post-RA scheduling.
+  virtual CodeGenOpt::Level getOptLevelToEnablePostRAScheduler() const {
+    return CodeGenOpt::Default;
+  }
+
+  /// \brief True if the subtarget should run the local reassignment
+  /// heuristic of the register allocator.
+  /// This heuristic may be compile time intensive, \p OptLevel provides
+  /// a finer grain to tune the register allocator.
+  virtual bool enableRALocalReassignment(CodeGenOpt::Level OptLevel) const;
+
+  /// \brief True if the subtarget should consider the cost of local intervals
+  /// created by a split candidate when choosing the best split candidate. This
+  /// heuristic may be compile time intensive.
+  virtual bool enableAdvancedRASplitCost() const;
+
+  /// \brief Enable use of alias analysis during code generation (during MI
+  /// scheduling, DAGCombine, etc.).
+  virtual bool useAA() const;
+
+  /// \brief Enable the use of the early if conversion pass.
+  virtual bool enableEarlyIfConversion() const { return false; }
+
+  /// \brief Return PBQPConstraint(s) for the target.
+  ///
+  /// Override to provide custom PBQP constraints.
+  virtual std::unique_ptr<PBQPRAConstraint> getCustomPBQPConstraints() const {
+    return nullptr;
+  }
+
+  /// Enable tracking of subregister liveness in register allocator.
+  /// Please use MachineRegisterInfo::subRegLivenessEnabled() instead where
+  /// possible.
+  virtual bool enableSubRegLiveness() const { return false; }
+
+  /// Returns string representation of scheduler comment
+  std::string getSchedInfoStr(const MachineInstr &MI) const override;
+  std::string getSchedInfoStr(MCInst const &MCI) const override;
+
+  /// This is called after a .mir file was loaded.
+  virtual void mirFileLoaded(MachineFunction &MF) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_TARGETSUBTARGETINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/UnreachableBlockElim.h b/linux-x64/clang/include/llvm/CodeGen/UnreachableBlockElim.h
new file mode 100644
index 0000000..3e7afd4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/UnreachableBlockElim.h
@@ -0,0 +1,37 @@
+//===-- UnreachableBlockElim.h - Remove unreachable blocks for codegen --===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass is an extremely simple version of the SimplifyCFG pass.  Its sole
+// job is to delete LLVM basic blocks that are not reachable from the entry
+// node.  To do this, it performs a simple depth first traversal of the CFG,
+// then deletes any unvisited nodes.
+//
+// Note that this pass is really a hack.  In particular, the instruction
+// selectors for various targets should just not generate code for unreachable
+// blocks.  Until LLVM has a more systematic way of defining instruction
+// selectors, however, we cannot really expect them to handle additional
+// complexity.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_CODEGEN_UNREACHABLEBLOCKELIM_H
+#define LLVM_LIB_CODEGEN_UNREACHABLEBLOCKELIM_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class UnreachableBlockElimPass
+    : public PassInfoMixin<UnreachableBlockElimPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // end namespace llvm
+
+#endif // LLVM_LIB_CODEGEN_UNREACHABLEBLOCKELIM_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ValueTypes.h b/linux-x64/clang/include/llvm/CodeGen/ValueTypes.h
new file mode 100644
index 0000000..d2ef4a9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ValueTypes.h
@@ -0,0 +1,437 @@
+//===- CodeGen/ValueTypes.h - Low-Level Target independ. types --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the set of low-level target independent types which various
+// values in the code generator are.  This allows the target specific behavior
+// of instructions to be described to target independent passes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_VALUETYPES_H
+#define LLVM_CODEGEN_VALUETYPES_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MachineValueType.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+#include <cstdint>
+#include <string>
+
+namespace llvm {
+
+  class LLVMContext;
+  class Type;
+
+  /// Extended Value Type. Capable of holding value types which are not native
+  /// for any processor (such as the i12345 type), as well as the types an MVT
+  /// can represent.
+  struct EVT {
+  private:
+    MVT V = MVT::INVALID_SIMPLE_VALUE_TYPE;
+    Type *LLVMTy = nullptr;
+
+  public:
+    constexpr EVT() = default;
+    constexpr EVT(MVT::SimpleValueType SVT) : V(SVT) {}
+    constexpr EVT(MVT S) : V(S) {}
+
+    bool operator==(EVT VT) const {
+      return !(*this != VT);
+    }
+    bool operator!=(EVT VT) const {
+      if (V.SimpleTy != VT.V.SimpleTy)
+        return true;
+      if (V.SimpleTy == MVT::INVALID_SIMPLE_VALUE_TYPE)
+        return LLVMTy != VT.LLVMTy;
+      return false;
+    }
+
+    /// Returns the EVT that represents a floating-point type with the given
+    /// number of bits. There are two floating-point types with 128 bits - this
+    /// returns f128 rather than ppcf128.
+    static EVT getFloatingPointVT(unsigned BitWidth) {
+      return MVT::getFloatingPointVT(BitWidth);
+    }
+
+    /// Returns the EVT that represents an integer with the given number of
+    /// bits.
+    static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth) {
+      MVT M = MVT::getIntegerVT(BitWidth);
+      if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
+        return M;
+      return getExtendedIntegerVT(Context, BitWidth);
+    }
+
+    /// Returns the EVT that represents a vector NumElements in length, where
+    /// each element is of type VT.
+    static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements,
+                           bool IsScalable = false) {
+      MVT M = MVT::getVectorVT(VT.V, NumElements, IsScalable);
+      if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
+        return M;
+
+      assert(!IsScalable && "We don't support extended scalable types yet");
+      return getExtendedVectorVT(Context, VT, NumElements);
+    }
+
+    /// Returns the EVT that represents a vector EC.Min elements in length,
+    /// where each element is of type VT.
+    static EVT getVectorVT(LLVMContext &Context, EVT VT, MVT::ElementCount EC) {
+      MVT M = MVT::getVectorVT(VT.V, EC);
+      if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
+        return M;
+      assert (!EC.Scalable && "We don't support extended scalable types yet");
+      return getExtendedVectorVT(Context, VT, EC.Min);
+    }
+
+    /// Return a vector with the same number of elements as this vector, but
+    /// with the element type converted to an integer type with the same
+    /// bitwidth.
+    EVT changeVectorElementTypeToInteger() const {
+      if (!isSimple()) {
+        assert (!isScalableVector() &&
+                "We don't support extended scalable types yet");
+        return changeExtendedVectorElementTypeToInteger();
+      }
+      MVT EltTy = getSimpleVT().getVectorElementType();
+      unsigned BitWidth = EltTy.getSizeInBits();
+      MVT IntTy = MVT::getIntegerVT(BitWidth);
+      MVT VecTy = MVT::getVectorVT(IntTy, getVectorNumElements(),
+                                   isScalableVector());
+      assert(VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE &&
+             "Simple vector VT not representable by simple integer vector VT!");
+      return VecTy;
+    }
+
+    /// Return the type converted to an equivalently sized integer or vector
+    /// with integer element type. Similar to changeVectorElementTypeToInteger,
+    /// but also handles scalars.
+    EVT changeTypeToInteger() {
+      if (isVector())
+        return changeVectorElementTypeToInteger();
+
+      if (isSimple())
+        return MVT::getIntegerVT(getSizeInBits());
+
+      return changeExtendedTypeToInteger();
+    }
+
+    /// Test if the given EVT is simple (as opposed to being extended).
+    bool isSimple() const {
+      return V.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE;
+    }
+
+    /// Test if the given EVT is extended (as opposed to being simple).
+    bool isExtended() const {
+      return !isSimple();
+    }
+
+    /// Return true if this is a FP or a vector FP type.
+    bool isFloatingPoint() const {
+      return isSimple() ? V.isFloatingPoint() : isExtendedFloatingPoint();
+    }
+
+    /// Return true if this is an integer or a vector integer type.
+    bool isInteger() const {
+      return isSimple() ? V.isInteger() : isExtendedInteger();
+    }
+
+    /// Return true if this is an integer, but not a vector.
+    bool isScalarInteger() const {
+      return isSimple() ? V.isScalarInteger() : isExtendedScalarInteger();
+    }
+
+    /// Return true if this is a vector value type.
+    bool isVector() const {
+      return isSimple() ? V.isVector() : isExtendedVector();
+    }
+
+    /// Return true if this is a vector type where the runtime
+    /// length is machine dependent
+    bool isScalableVector() const {
+      // FIXME: We don't support extended scalable types yet, because the
+      // matching IR type doesn't exist. Once it has been added, this can
+      // be changed to call isExtendedScalableVector.
+      if (!isSimple())
+        return false;
+      return V.isScalableVector();
+    }
+
+    /// Return true if this is a 16-bit vector type.
+    bool is16BitVector() const {
+      return isSimple() ? V.is16BitVector() : isExtended16BitVector();
+    }
+
+    /// Return true if this is a 32-bit vector type.
+    bool is32BitVector() const {
+      return isSimple() ? V.is32BitVector() : isExtended32BitVector();
+    }
+
+    /// Return true if this is a 64-bit vector type.
+    bool is64BitVector() const {
+      return isSimple() ? V.is64BitVector() : isExtended64BitVector();
+    }
+
+    /// Return true if this is a 128-bit vector type.
+    bool is128BitVector() const {
+      return isSimple() ? V.is128BitVector() : isExtended128BitVector();
+    }
+
+    /// Return true if this is a 256-bit vector type.
+    bool is256BitVector() const {
+      return isSimple() ? V.is256BitVector() : isExtended256BitVector();
+    }
+
+    /// Return true if this is a 512-bit vector type.
+    bool is512BitVector() const {
+      return isSimple() ? V.is512BitVector() : isExtended512BitVector();
+    }
+
+    /// Return true if this is a 1024-bit vector type.
+    bool is1024BitVector() const {
+      return isSimple() ? V.is1024BitVector() : isExtended1024BitVector();
+    }
+
+    /// Return true if this is a 2048-bit vector type.
+    bool is2048BitVector() const {
+      return isSimple() ? V.is2048BitVector() : isExtended2048BitVector();
+    }
+
+    /// Return true if this is an overloaded type for TableGen.
+    bool isOverloaded() const {
+      return (V==MVT::iAny || V==MVT::fAny || V==MVT::vAny || V==MVT::iPTRAny);
+    }
+
+    /// Return true if the bit size is a multiple of 8.
+    bool isByteSized() const {
+      return (getSizeInBits() & 7) == 0;
+    }
+
+    /// Return true if the size is a power-of-two number of bytes.
+    bool isRound() const {
+      unsigned BitSize = getSizeInBits();
+      return BitSize >= 8 && !(BitSize & (BitSize - 1));
+    }
+
+    /// Return true if this has the same number of bits as VT.
+    bool bitsEq(EVT VT) const {
+      if (EVT::operator==(VT)) return true;
+      return getSizeInBits() == VT.getSizeInBits();
+    }
+
+    /// Return true if this has more bits than VT.
+    bool bitsGT(EVT VT) const {
+      if (EVT::operator==(VT)) return false;
+      return getSizeInBits() > VT.getSizeInBits();
+    }
+
+    /// Return true if this has no less bits than VT.
+    bool bitsGE(EVT VT) const {
+      if (EVT::operator==(VT)) return true;
+      return getSizeInBits() >= VT.getSizeInBits();
+    }
+
+    /// Return true if this has less bits than VT.
+    bool bitsLT(EVT VT) const {
+      if (EVT::operator==(VT)) return false;
+      return getSizeInBits() < VT.getSizeInBits();
+    }
+
+    /// Return true if this has no more bits than VT.
+    bool bitsLE(EVT VT) const {
+      if (EVT::operator==(VT)) return true;
+      return getSizeInBits() <= VT.getSizeInBits();
+    }
+
+    /// Return the SimpleValueType held in the specified simple EVT.
+    MVT getSimpleVT() const {
+      assert(isSimple() && "Expected a SimpleValueType!");
+      return V;
+    }
+
+    /// If this is a vector type, return the element type, otherwise return
+    /// this.
+    EVT getScalarType() const {
+      return isVector() ? getVectorElementType() : *this;
+    }
+
+    /// Given a vector type, return the type of each element.
+    EVT getVectorElementType() const {
+      assert(isVector() && "Invalid vector type!");
+      if (isSimple())
+        return V.getVectorElementType();
+      return getExtendedVectorElementType();
+    }
+
+    /// Given a vector type, return the number of elements it contains.
+    unsigned getVectorNumElements() const {
+      assert(isVector() && "Invalid vector type!");
+      if (isSimple())
+        return V.getVectorNumElements();
+      return getExtendedVectorNumElements();
+    }
+
+    // Given a (possibly scalable) vector type, return the ElementCount
+    MVT::ElementCount getVectorElementCount() const {
+      assert((isVector()) && "Invalid vector type!");
+      if (isSimple())
+        return V.getVectorElementCount();
+
+      assert(!isScalableVector() &&
+             "We don't support extended scalable types yet");
+      return {getExtendedVectorNumElements(), false};
+    }
+
+    /// Return the size of the specified value type in bits.
+    unsigned getSizeInBits() const {
+      if (isSimple())
+        return V.getSizeInBits();
+      return getExtendedSizeInBits();
+    }
+
+    unsigned getScalarSizeInBits() const {
+      return getScalarType().getSizeInBits();
+    }
+
+    /// Return the number of bytes overwritten by a store of the specified value
+    /// type.
+    unsigned getStoreSize() const {
+      return (getSizeInBits() + 7) / 8;
+    }
+
+    /// Return the number of bits overwritten by a store of the specified value
+    /// type.
+    unsigned getStoreSizeInBits() const {
+      return getStoreSize() * 8;
+    }
+
+    /// Rounds the bit-width of the given integer EVT up to the nearest power of
+    /// two (and at least to eight), and returns the integer EVT with that
+    /// number of bits.
+    EVT getRoundIntegerType(LLVMContext &Context) const {
+      assert(isInteger() && !isVector() && "Invalid integer type!");
+      unsigned BitWidth = getSizeInBits();
+      if (BitWidth <= 8)
+        return EVT(MVT::i8);
+      return getIntegerVT(Context, 1 << Log2_32_Ceil(BitWidth));
+    }
+
+    /// Finds the smallest simple value type that is greater than or equal to
+    /// half the width of this EVT. If no simple value type can be found, an
+    /// extended integer value type of half the size (rounded up) is returned.
+    EVT getHalfSizedIntegerVT(LLVMContext &Context) const {
+      assert(isInteger() && !isVector() && "Invalid integer type!");
+      unsigned EVTSize = getSizeInBits();
+      for (unsigned IntVT = MVT::FIRST_INTEGER_VALUETYPE;
+          IntVT <= MVT::LAST_INTEGER_VALUETYPE; ++IntVT) {
+        EVT HalfVT = EVT((MVT::SimpleValueType)IntVT);
+        if (HalfVT.getSizeInBits() * 2 >= EVTSize)
+          return HalfVT;
+      }
+      return getIntegerVT(Context, (EVTSize + 1) / 2);
+    }
+
+    /// Return a VT for an integer vector type with the size of the
+    /// elements doubled. The typed returned may be an extended type.
+    EVT widenIntegerVectorElementType(LLVMContext &Context) const {
+      EVT EltVT = getVectorElementType();
+      EltVT = EVT::getIntegerVT(Context, 2 * EltVT.getSizeInBits());
+      return EVT::getVectorVT(Context, EltVT, getVectorElementCount());
+    }
+
+    // Return a VT for a vector type with the same element type but
+    // half the number of elements. The type returned may be an
+    // extended type.
+    EVT getHalfNumVectorElementsVT(LLVMContext &Context) const {
+      EVT EltVT = getVectorElementType();
+      auto EltCnt = getVectorElementCount();
+      assert(!(EltCnt.Min & 1) && "Splitting vector, but not in half!");
+      return EVT::getVectorVT(Context, EltVT, EltCnt / 2);
+    }
+
+    /// Returns true if the given vector is a power of 2.
+    bool isPow2VectorType() const {
+      unsigned NElts = getVectorNumElements();
+      return !(NElts & (NElts - 1));
+    }
+
+    /// Widens the length of the given vector EVT up to the nearest power of 2
+    /// and returns that type.
+    EVT getPow2VectorType(LLVMContext &Context) const {
+      if (!isPow2VectorType()) {
+        unsigned NElts = getVectorNumElements();
+        unsigned Pow2NElts = 1 <<  Log2_32_Ceil(NElts);
+        return EVT::getVectorVT(Context, getVectorElementType(), Pow2NElts,
+                                isScalableVector());
+      }
+      else {
+        return *this;
+      }
+    }
+
+    /// This function returns value type as a string, e.g. "i32".
+    std::string getEVTString() const;
+
+    /// This method returns an LLVM type corresponding to the specified EVT.
+    /// For integer types, this returns an unsigned type. Note that this will
+    /// abort for types that cannot be represented.
+    Type *getTypeForEVT(LLVMContext &Context) const;
+
+    /// Return the value type corresponding to the specified type.
+    /// This returns all pointers as iPTR.  If HandleUnknown is true, unknown
+    /// types are returned as Other, otherwise they are invalid.
+    static EVT getEVT(Type *Ty, bool HandleUnknown = false);
+
+    intptr_t getRawBits() const {
+      if (isSimple())
+        return V.SimpleTy;
+      else
+        return (intptr_t)(LLVMTy);
+    }
+
+    /// A meaningless but well-behaved order, useful for constructing
+    /// containers.
+    struct compareRawBits {
+      bool operator()(EVT L, EVT R) const {
+        if (L.V.SimpleTy == R.V.SimpleTy)
+          return L.LLVMTy < R.LLVMTy;
+        else
+          return L.V.SimpleTy < R.V.SimpleTy;
+      }
+    };
+
+  private:
+    // Methods for handling the Extended-type case in functions above.
+    // These are all out-of-line to prevent users of this header file
+    // from having a dependency on Type.h.
+    EVT changeExtendedTypeToInteger() const;
+    EVT changeExtendedVectorElementTypeToInteger() const;
+    static EVT getExtendedIntegerVT(LLVMContext &C, unsigned BitWidth);
+    static EVT getExtendedVectorVT(LLVMContext &C, EVT VT,
+                                   unsigned NumElements);
+    bool isExtendedFloatingPoint() const LLVM_READONLY;
+    bool isExtendedInteger() const LLVM_READONLY;
+    bool isExtendedScalarInteger() const LLVM_READONLY;
+    bool isExtendedVector() const LLVM_READONLY;
+    bool isExtended16BitVector() const LLVM_READONLY;
+    bool isExtended32BitVector() const LLVM_READONLY;
+    bool isExtended64BitVector() const LLVM_READONLY;
+    bool isExtended128BitVector() const LLVM_READONLY;
+    bool isExtended256BitVector() const LLVM_READONLY;
+    bool isExtended512BitVector() const LLVM_READONLY;
+    bool isExtended1024BitVector() const LLVM_READONLY;
+    bool isExtended2048BitVector() const LLVM_READONLY;
+    EVT getExtendedVectorElementType() const;
+    unsigned getExtendedVectorNumElements() const LLVM_READONLY;
+    unsigned getExtendedSizeInBits() const LLVM_READONLY;
+  };
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_VALUETYPES_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/ValueTypes.td b/linux-x64/clang/include/llvm/CodeGen/ValueTypes.td
new file mode 100644
index 0000000..673eec9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/ValueTypes.td
@@ -0,0 +1,169 @@
+//===- ValueTypes.td - ValueType definitions ---------------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Value types - These values correspond to the register types defined in the
+// ValueTypes.h file.  If you update anything here, you must update it there as
+// well!
+//
+//===----------------------------------------------------------------------===//
+
+class ValueType<int size, int value> {
+  string Namespace = "MVT";
+  int Size = size;
+  int Value = value;
+}
+
+def OtherVT: ValueType<0  ,  1>;   // "Other" value
+def i1     : ValueType<1  ,  2>;   // One bit boolean value
+def i8     : ValueType<8  ,  3>;   // 8-bit integer value
+def i16    : ValueType<16 ,  4>;   // 16-bit integer value
+def i32    : ValueType<32 ,  5>;   // 32-bit integer value
+def i64    : ValueType<64 ,  6>;   // 64-bit integer value
+def i128   : ValueType<128,  7>;   // 128-bit integer value
+def f16    : ValueType<16 ,  8>;   // 16-bit floating point value
+def f32    : ValueType<32 ,  9>;   // 32-bit floating point value
+def f64    : ValueType<64 , 10>;   // 64-bit floating point value
+def f80    : ValueType<80 , 11>;   // 80-bit floating point value
+def f128   : ValueType<128, 12>;   // 128-bit floating point value
+def ppcf128: ValueType<128, 13>;   // PPC 128-bit floating point value
+
+def v1i1   : ValueType<1 ,  14>;   //   1 x i1 vector value
+def v2i1   : ValueType<2 ,  15>;   //   2 x i1 vector value
+def v4i1   : ValueType<4 ,  16>;   //   4 x i1 vector value
+def v8i1   : ValueType<8 ,  17>;   //   8 x i1 vector value
+def v16i1  : ValueType<16,  18>;   //  16 x i1 vector value
+def v32i1  : ValueType<32 , 19>;   //  32 x i1 vector value
+def v64i1  : ValueType<64 , 20>;   //  64 x i1 vector value
+def v128i1 : ValueType<128, 21>;   // 128 x i1 vector value
+def v512i1 : ValueType<512, 22>;   // 512 x i1 vector value
+def v1024i1: ValueType<1024,23>;   //1024 x i1 vector value
+
+def v1i8   : ValueType<8,   24>;   //  1 x i8  vector value
+def v2i8   : ValueType<16 , 25>;   //  2 x i8  vector value
+def v4i8   : ValueType<32 , 26>;   //  4 x i8  vector value
+def v8i8   : ValueType<64 , 27>;   //  8 x i8  vector value
+def v16i8  : ValueType<128, 28>;   // 16 x i8  vector value
+def v32i8  : ValueType<256, 29>;   // 32 x i8  vector value
+def v64i8  : ValueType<512, 30>;   // 64 x i8  vector value
+def v128i8 : ValueType<1024,31>;   //128 x i8  vector value
+def v256i8 : ValueType<2048,32>;   //256 x i8  vector value
+
+def v1i16  : ValueType<16 , 33>;   //  1 x i16 vector value
+def v2i16  : ValueType<32 , 34>;   //  2 x i16 vector value
+def v4i16  : ValueType<64 , 35>;   //  4 x i16 vector value
+def v8i16  : ValueType<128, 36>;   //  8 x i16 vector value
+def v16i16 : ValueType<256, 37>;   // 16 x i16 vector value
+def v32i16 : ValueType<512, 38>;   // 32 x i16 vector value
+def v64i16 : ValueType<1024,39>;   // 64 x i16 vector value
+def v128i16: ValueType<2048,40>;   //128 x i16 vector value
+
+def v1i32  : ValueType<32 , 41>;   //  1 x i32 vector value
+def v2i32  : ValueType<64 , 42>;   //  2 x i32 vector value
+def v4i32  : ValueType<128, 43>;   //  4 x i32 vector value
+def v8i32  : ValueType<256, 44>;   //  8 x i32 vector value
+def v16i32 : ValueType<512, 45>;   // 16 x i32 vector value
+def v32i32 : ValueType<1024,46>;   // 32 x i32 vector value
+def v64i32 : ValueType<2048,47>;   // 64 x i32 vector value
+
+def v1i64  : ValueType<64 , 48>;   //  1 x i64 vector value
+def v2i64  : ValueType<128, 49>;   //  2 x i64 vector value
+def v4i64  : ValueType<256, 50>;   //  4 x i64 vector value
+def v8i64  : ValueType<512, 51>;   //  8 x i64 vector value
+def v16i64 : ValueType<1024,52>;   // 16 x i64 vector value
+def v32i64 : ValueType<2048,53>;   // 32 x i64 vector value
+
+def v1i128 : ValueType<128, 54>;   //  1 x i128 vector value
+
+def nxv1i1  : ValueType<1,   55>;  // n x  1 x i1  vector value
+def nxv2i1  : ValueType<2,   56>;  // n x  2 x i1  vector value
+def nxv4i1  : ValueType<4,   57>;  // n x  4 x i1  vector value
+def nxv8i1  : ValueType<8,   58>;  // n x  8 x i1  vector value
+def nxv16i1 : ValueType<16,  59>;  // n x 16 x i1  vector value
+def nxv32i1 : ValueType<32,  60>;  // n x 32 x i1  vector value
+
+def nxv1i8  : ValueType<8,   61>;  // n x  1 x i8  vector value
+def nxv2i8  : ValueType<16,  62>;  // n x  2 x i8  vector value
+def nxv4i8  : ValueType<32,  63>;  // n x  4 x i8  vector value
+def nxv8i8  : ValueType<64,  64>;  // n x  8 x i8  vector value
+def nxv16i8 : ValueType<128, 65>;  // n x 16 x i8  vector value
+def nxv32i8 : ValueType<256, 66>;  // n x 32 x i8  vector value
+
+def nxv1i16 : ValueType<16,  67>;  // n x  1 x i16 vector value
+def nxv2i16 : ValueType<32,  68>;  // n x  2 x i16 vector value
+def nxv4i16 : ValueType<64,  69>;  // n x  4 x i16 vector value
+def nxv8i16 : ValueType<128, 70>;  // n x  8 x i16 vector value
+def nxv16i16: ValueType<256, 71>;  // n x 16 x i16 vector value
+def nxv32i16: ValueType<512, 72>;  // n x 32 x i16 vector value
+
+def nxv1i32 : ValueType<32,  73>;  // n x  1 x i32 vector value
+def nxv2i32 : ValueType<64,  74>;  // n x  2 x i32 vector value
+def nxv4i32 : ValueType<128, 75>;  // n x  4 x i32 vector value
+def nxv8i32 : ValueType<256, 76>;  // n x  8 x i32 vector value
+def nxv16i32: ValueType<512, 77>;  // n x 16 x i32 vector value
+def nxv32i32: ValueType<1024,78>;  // n x 32 x i32 vector value
+
+def nxv1i64 : ValueType<64,  79>;  // n x  1 x i64 vector value
+def nxv2i64 : ValueType<128, 80>;  // n x  2 x i64 vector value
+def nxv4i64 : ValueType<256, 81>;  // n x  4 x i64 vector value
+def nxv8i64 : ValueType<512, 82>;  // n x  8 x i64 vector value
+def nxv16i64: ValueType<1024,83>;  // n x 16 x i64 vector value
+def nxv32i64: ValueType<2048,84>;  // n x 32 x i64 vector value
+
+def v2f16  : ValueType<32 , 85>;   //  2 x f16 vector value
+def v4f16  : ValueType<64 , 86>;   //  4 x f16 vector value
+def v8f16  : ValueType<128, 87>;   //  8 x f16 vector value
+def v1f32  : ValueType<32 , 88>;   //  1 x f32 vector value
+def v2f32  : ValueType<64 , 89>;   //  2 x f32 vector value
+def v4f32  : ValueType<128, 90>;   //  4 x f32 vector value
+def v8f32  : ValueType<256, 91>;   //  8 x f32 vector value
+def v16f32 : ValueType<512, 92>;   // 16 x f32 vector value
+def v1f64  : ValueType<64,  93>;   //  1 x f64 vector value
+def v2f64  : ValueType<128, 94>;   //  2 x f64 vector value
+def v4f64  : ValueType<256, 95>;   //  4 x f64 vector value
+def v8f64  : ValueType<512, 96>;   //  8 x f64 vector value
+
+def nxv2f16  : ValueType<32 ,  97>; // n x  2 x f16 vector value
+def nxv4f16  : ValueType<64 ,  98>; // n x  4 x f16 vector value
+def nxv8f16  : ValueType<128,  99>; // n x  8 x f16 vector value
+def nxv1f32  : ValueType<32 , 100>; // n x  1 x f32 vector value
+def nxv2f32  : ValueType<64 , 101>; // n x  2 x f32 vector value
+def nxv4f32  : ValueType<128, 102>; // n x  4 x f32 vector value
+def nxv8f32  : ValueType<256, 103>; // n x  8 x f32 vector value
+def nxv16f32 : ValueType<512, 104>; // n x 16 x f32 vector value
+def nxv1f64  : ValueType<64,  105>; // n x  1 x f64 vector value
+def nxv2f64  : ValueType<128, 106>; // n x  2 x f64 vector value
+def nxv4f64  : ValueType<256, 107>; // n x  4 x f64 vector value
+def nxv8f64  : ValueType<512, 108>; // n x  8 x f64 vector value
+
+def x86mmx : ValueType<64 , 109>;   // X86 MMX value
+def FlagVT : ValueType<0  , 110>;   // Pre-RA sched glue
+def isVoid : ValueType<0  , 111>;   // Produces no value
+def untyped: ValueType<8  , 112>;   // Produces an untyped value
+def ExceptRef: ValueType<0, 113>;   // WebAssembly's except_ref type
+def token  : ValueType<0  , 248>;   // TokenTy
+def MetadataVT: ValueType<0, 249>;  // Metadata
+
+// Pseudo valuetype mapped to the current pointer size to any address space.
+// Should only be used in TableGen.
+def iPTRAny   : ValueType<0, 250>;
+
+// Pseudo valuetype to represent "vector of any size"
+def vAny   : ValueType<0  , 251>;
+
+// Pseudo valuetype to represent "float of any format"
+def fAny   : ValueType<0  , 252>;
+
+// Pseudo valuetype to represent "integer of any bit width"
+def iAny   : ValueType<0  , 253>;
+
+// Pseudo valuetype mapped to the current pointer size.
+def iPTR   : ValueType<0  , 254>;
+
+// Pseudo valuetype to represent "any type of any size".
+def Any    : ValueType<0  , 255>;
diff --git a/linux-x64/clang/include/llvm/CodeGen/VirtRegMap.h b/linux-x64/clang/include/llvm/CodeGen/VirtRegMap.h
new file mode 100644
index 0000000..3b06f03
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/VirtRegMap.h
@@ -0,0 +1,188 @@
+//===- llvm/CodeGen/VirtRegMap.h - Virtual Register Map ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a virtual register map. This maps virtual registers to
+// physical registers and virtual registers to stack slots. It is created and
+// updated by a register allocator and then used by a machine code rewriter that
+// adds spill code and rewrites virtual into physical register references.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_VIRTREGMAP_H
+#define LLVM_CODEGEN_VIRTREGMAP_H
+
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Pass.h"
+#include <cassert>
+
+namespace llvm {
+
+class MachineFunction;
+class MachineRegisterInfo;
+class raw_ostream;
+class TargetInstrInfo;
+
+  class VirtRegMap : public MachineFunctionPass {
+  public:
+    enum {
+      NO_PHYS_REG = 0,
+      NO_STACK_SLOT = (1L << 30)-1,
+      MAX_STACK_SLOT = (1L << 18)-1
+    };
+
+  private:
+    MachineRegisterInfo *MRI;
+    const TargetInstrInfo *TII;
+    const TargetRegisterInfo *TRI;
+    MachineFunction *MF;
+
+    /// Virt2PhysMap - This is a virtual to physical register
+    /// mapping. Each virtual register is required to have an entry in
+    /// it; even spilled virtual registers (the register mapped to a
+    /// spilled register is the temporary used to load it from the
+    /// stack).
+    IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2PhysMap;
+
+    /// Virt2StackSlotMap - This is virtual register to stack slot
+    /// mapping. Each spilled virtual register has an entry in it
+    /// which corresponds to the stack slot this register is spilled
+    /// at.
+    IndexedMap<int, VirtReg2IndexFunctor> Virt2StackSlotMap;
+
+    /// Virt2SplitMap - This is virtual register to splitted virtual register
+    /// mapping.
+    IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2SplitMap;
+
+    /// createSpillSlot - Allocate a spill slot for RC from MFI.
+    unsigned createSpillSlot(const TargetRegisterClass *RC);
+
+  public:
+    static char ID;
+
+    VirtRegMap() : MachineFunctionPass(ID), Virt2PhysMap(NO_PHYS_REG),
+                   Virt2StackSlotMap(NO_STACK_SLOT), Virt2SplitMap(0) {}
+    VirtRegMap(const VirtRegMap &) = delete;
+    VirtRegMap &operator=(const VirtRegMap &) = delete;
+
+    bool runOnMachineFunction(MachineFunction &MF) override;
+
+    void getAnalysisUsage(AnalysisUsage &AU) const override {
+      AU.setPreservesAll();
+      MachineFunctionPass::getAnalysisUsage(AU);
+    }
+
+    MachineFunction &getMachineFunction() const {
+      assert(MF && "getMachineFunction called before runOnMachineFunction");
+      return *MF;
+    }
+
+    MachineRegisterInfo &getRegInfo() const { return *MRI; }
+    const TargetRegisterInfo &getTargetRegInfo() const { return *TRI; }
+
+    void grow();
+
+    /// @brief returns true if the specified virtual register is
+    /// mapped to a physical register
+    bool hasPhys(unsigned virtReg) const {
+      return getPhys(virtReg) != NO_PHYS_REG;
+    }
+
+    /// @brief returns the physical register mapped to the specified
+    /// virtual register
+    unsigned getPhys(unsigned virtReg) const {
+      assert(TargetRegisterInfo::isVirtualRegister(virtReg));
+      return Virt2PhysMap[virtReg];
+    }
+
+    /// @brief creates a mapping for the specified virtual register to
+    /// the specified physical register
+    void assignVirt2Phys(unsigned virtReg, MCPhysReg physReg);
+
+    /// @brief clears the specified virtual register's, physical
+    /// register mapping
+    void clearVirt(unsigned virtReg) {
+      assert(TargetRegisterInfo::isVirtualRegister(virtReg));
+      assert(Virt2PhysMap[virtReg] != NO_PHYS_REG &&
+             "attempt to clear a not assigned virtual register");
+      Virt2PhysMap[virtReg] = NO_PHYS_REG;
+    }
+
+    /// @brief clears all virtual to physical register mappings
+    void clearAllVirt() {
+      Virt2PhysMap.clear();
+      grow();
+    }
+
+    /// @brief returns true if VirtReg is assigned to its preferred physreg.
+    bool hasPreferredPhys(unsigned VirtReg);
+
+    /// @brief returns true if VirtReg has a known preferred register.
+    /// This returns false if VirtReg has a preference that is a virtual
+    /// register that hasn't been assigned yet.
+    bool hasKnownPreference(unsigned VirtReg);
+
+    /// @brief records virtReg is a split live interval from SReg.
+    void setIsSplitFromReg(unsigned virtReg, unsigned SReg) {
+      Virt2SplitMap[virtReg] = SReg;
+    }
+
+    /// @brief returns the live interval virtReg is split from.
+    unsigned getPreSplitReg(unsigned virtReg) const {
+      return Virt2SplitMap[virtReg];
+    }
+
+    /// getOriginal - Return the original virtual register that VirtReg descends
+    /// from through splitting.
+    /// A register that was not created by splitting is its own original.
+    /// This operation is idempotent.
+    unsigned getOriginal(unsigned VirtReg) const {
+      unsigned Orig = getPreSplitReg(VirtReg);
+      return Orig ? Orig : VirtReg;
+    }
+
+    /// @brief returns true if the specified virtual register is not
+    /// mapped to a stack slot or rematerialized.
+    bool isAssignedReg(unsigned virtReg) const {
+      if (getStackSlot(virtReg) == NO_STACK_SLOT)
+        return true;
+      // Split register can be assigned a physical register as well as a
+      // stack slot or remat id.
+      return (Virt2SplitMap[virtReg] && Virt2PhysMap[virtReg] != NO_PHYS_REG);
+    }
+
+    /// @brief returns the stack slot mapped to the specified virtual
+    /// register
+    int getStackSlot(unsigned virtReg) const {
+      assert(TargetRegisterInfo::isVirtualRegister(virtReg));
+      return Virt2StackSlotMap[virtReg];
+    }
+
+    /// @brief create a mapping for the specifed virtual register to
+    /// the next available stack slot
+    int assignVirt2StackSlot(unsigned virtReg);
+
+    /// @brief create a mapping for the specified virtual register to
+    /// the specified stack slot
+    void assignVirt2StackSlot(unsigned virtReg, int frameIndex);
+
+    void print(raw_ostream &OS, const Module* M = nullptr) const override;
+    void dump() const;
+  };
+
+  inline raw_ostream &operator<<(raw_ostream &OS, const VirtRegMap &VRM) {
+    VRM.print(OS);
+    return OS;
+  }
+
+} // end llvm namespace
+
+#endif // LLVM_CODEGEN_VIRTREGMAP_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/WinEHFuncInfo.h b/linux-x64/clang/include/llvm/CodeGen/WinEHFuncInfo.h
new file mode 100644
index 0000000..8043024
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/WinEHFuncInfo.h
@@ -0,0 +1,129 @@
+//===- llvm/CodeGen/WinEHFuncInfo.h -----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Data structures and associated state for Windows exception handling schemes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_WINEHFUNCINFO_H
+#define LLVM_CODEGEN_WINEHFUNCINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SmallVector.h"
+#include <cstdint>
+#include <limits>
+#include <utility>
+
+namespace llvm {
+
+class AllocaInst;
+class BasicBlock;
+class FuncletPadInst;
+class Function;
+class GlobalVariable;
+class Instruction;
+class InvokeInst;
+class MachineBasicBlock;
+class MCSymbol;
+
+// The following structs respresent the .xdata tables for various
+// Windows-related EH personalities.
+
+using MBBOrBasicBlock = PointerUnion<const BasicBlock *, MachineBasicBlock *>;
+
+struct CxxUnwindMapEntry {
+  int ToState;
+  MBBOrBasicBlock Cleanup;
+};
+
+/// Similar to CxxUnwindMapEntry, but supports SEH filters.
+struct SEHUnwindMapEntry {
+  /// If unwinding continues through this handler, transition to the handler at
+  /// this state. This indexes into SEHUnwindMap.
+  int ToState = -1;
+
+  bool IsFinally = false;
+
+  /// Holds the filter expression function.
+  const Function *Filter = nullptr;
+
+  /// Holds the __except or __finally basic block.
+  MBBOrBasicBlock Handler;
+};
+
+struct WinEHHandlerType {
+  int Adjectives;
+  /// The CatchObj starts out life as an LLVM alloca and is eventually turned
+  /// frame index.
+  union {
+    const AllocaInst *Alloca;
+    int FrameIndex;
+  } CatchObj = {};
+  GlobalVariable *TypeDescriptor;
+  MBBOrBasicBlock Handler;
+};
+
+struct WinEHTryBlockMapEntry {
+  int TryLow = -1;
+  int TryHigh = -1;
+  int CatchHigh = -1;
+  SmallVector<WinEHHandlerType, 1> HandlerArray;
+};
+
+enum class ClrHandlerType { Catch, Finally, Fault, Filter };
+
+struct ClrEHUnwindMapEntry {
+  MBBOrBasicBlock Handler;
+  uint32_t TypeToken;
+  int HandlerParentState; ///< Outer handler enclosing this entry's handler
+  int TryParentState; ///< Outer try region enclosing this entry's try region,
+                      ///< treating later catches on same try as "outer"
+  ClrHandlerType HandlerType;
+};
+
+struct WinEHFuncInfo {
+  DenseMap<const Instruction *, int> EHPadStateMap;
+  DenseMap<const FuncletPadInst *, int> FuncletBaseStateMap;
+  DenseMap<const InvokeInst *, int> InvokeStateMap;
+  DenseMap<MCSymbol *, std::pair<int, MCSymbol *>> LabelToStateMap;
+  SmallVector<CxxUnwindMapEntry, 4> CxxUnwindMap;
+  SmallVector<WinEHTryBlockMapEntry, 4> TryBlockMap;
+  SmallVector<SEHUnwindMapEntry, 4> SEHUnwindMap;
+  SmallVector<ClrEHUnwindMapEntry, 4> ClrEHUnwindMap;
+  int UnwindHelpFrameIdx = std::numeric_limits<int>::max();
+  int PSPSymFrameIdx = std::numeric_limits<int>::max();
+
+  int getLastStateNumber() const { return CxxUnwindMap.size() - 1; }
+
+  void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin,
+                         MCSymbol *InvokeEnd);
+
+  int EHRegNodeFrameIndex = std::numeric_limits<int>::max();
+  int EHRegNodeEndOffset = std::numeric_limits<int>::max();
+  int EHGuardFrameIndex = std::numeric_limits<int>::max();
+  int SEHSetFrameOffset = std::numeric_limits<int>::max();
+
+  WinEHFuncInfo();
+};
+
+/// Analyze the IR in ParentFn and it's handlers to build WinEHFuncInfo, which
+/// describes the state numbers and tables used by __CxxFrameHandler3. This
+/// analysis assumes that WinEHPrepare has already been run.
+void calculateWinCXXEHStateNumbers(const Function *ParentFn,
+                                   WinEHFuncInfo &FuncInfo);
+
+void calculateSEHStateNumbers(const Function *ParentFn,
+                              WinEHFuncInfo &FuncInfo);
+
+void calculateClrEHStateNumbers(const Function *Fn, WinEHFuncInfo &FuncInfo);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_WINEHFUNCINFO_H