Import prebuilt clang toolchain for linux.
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h
new file mode 100644
index 0000000..8d91cc4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -0,0 +1,215 @@
+//===- llvm/CodeGen/GlobalISel/CallLowering.h - Call lowering ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file describes how to lower LLVM calls to machine code calls.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
+#define LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/TargetCallingConv.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachineValueType.h"
+#include <cstdint>
+#include <functional>
+
+namespace llvm {
+
+class DataLayout;
+class Function;
+class MachineIRBuilder;
+class MachineOperand;
+struct MachinePointerInfo;
+class MachineRegisterInfo;
+class TargetLowering;
+class Type;
+class Value;
+
+class CallLowering {
+  const TargetLowering *TLI;
+
+public:
+  struct ArgInfo {
+    unsigned Reg;
+    Type *Ty;
+    ISD::ArgFlagsTy Flags;
+    bool IsFixed;
+
+    ArgInfo(unsigned Reg, Type *Ty, ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy{},
+            bool IsFixed = true)
+        : Reg(Reg), Ty(Ty), Flags(Flags), IsFixed(IsFixed) {}
+  };
+
+  /// Argument handling is mostly uniform between the four places that
+  /// make these decisions: function formal arguments, call
+  /// instruction args, call instruction returns and function
+  /// returns. However, once a decision has been made on where an
+  /// arugment should go, exactly what happens can vary slightly. This
+  /// class abstracts the differences.
+  struct ValueHandler {
+    ValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
+                 CCAssignFn *AssignFn)
+      : MIRBuilder(MIRBuilder), MRI(MRI), AssignFn(AssignFn) {}
+
+    virtual ~ValueHandler() = default;
+
+    /// Materialize a VReg containing the address of the specified
+    /// stack-based object. This is either based on a FrameIndex or
+    /// direct SP manipulation, depending on the context. \p MPO
+    /// should be initialized to an appropriate description of the
+    /// address created.
+    virtual unsigned getStackAddress(uint64_t Size, int64_t Offset,
+                                     MachinePointerInfo &MPO) = 0;
+
+    /// The specified value has been assigned to a physical register,
+    /// handle the appropriate COPY (either to or from) and mark any
+    /// relevant uses/defines as needed.
+    virtual void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
+                                  CCValAssign &VA) = 0;
+
+    /// The specified value has been assigned to a stack
+    /// location. Load or store it there, with appropriate extension
+    /// if necessary.
+    virtual void assignValueToAddress(unsigned ValVReg, unsigned Addr,
+                                      uint64_t Size, MachinePointerInfo &MPO,
+                                      CCValAssign &VA) = 0;
+
+    /// Handle custom values, which may be passed into one or more of \p VAs.
+    /// \return The number of \p VAs that have been assigned after the first
+    ///         one, and which should therefore be skipped from further
+    ///         processing.
+    virtual unsigned assignCustomValue(const ArgInfo &Arg,
+                                       ArrayRef<CCValAssign> VAs) {
+      // This is not a pure virtual method because not all targets need to worry
+      // about custom values.
+      llvm_unreachable("Custom values not supported");
+    }
+
+    unsigned extendRegister(unsigned ValReg, CCValAssign &VA);
+
+    virtual bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
+                           CCValAssign::LocInfo LocInfo, const ArgInfo &Info,
+                           CCState &State) {
+      return AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
+    }
+
+    MachineIRBuilder &MIRBuilder;
+    MachineRegisterInfo &MRI;
+    CCAssignFn *AssignFn;
+  };
+
+protected:
+  /// Getter for generic TargetLowering class.
+  const TargetLowering *getTLI() const {
+    return TLI;
+  }
+
+  /// Getter for target specific TargetLowering class.
+  template <class XXXTargetLowering>
+    const XXXTargetLowering *getTLI() const {
+    return static_cast<const XXXTargetLowering *>(TLI);
+  }
+
+  template <typename FuncInfoTy>
+  void setArgFlags(ArgInfo &Arg, unsigned OpNum, const DataLayout &DL,
+                   const FuncInfoTy &FuncInfo) const;
+
+  /// Invoke Handler::assignArg on each of the given \p Args and then use
+  /// \p Callback to move them to the assigned locations.
+  ///
+  /// \return True if everything has succeeded, false otherwise.
+  bool handleAssignments(MachineIRBuilder &MIRBuilder, ArrayRef<ArgInfo> Args,
+                         ValueHandler &Callback) const;
+
+public:
+  CallLowering(const TargetLowering *TLI) : TLI(TLI) {}
+  virtual ~CallLowering() = default;
+
+  /// This hook must be implemented to lower outgoing return values, described
+  /// by \p Val, into the specified virtual register \p VReg.
+  /// This hook is used by GlobalISel.
+  ///
+  /// \return True if the lowering succeeds, false otherwise.
+  virtual bool lowerReturn(MachineIRBuilder &MIRBuilder,
+                           const Value *Val, unsigned VReg) const {
+    return false;
+  }
+
+  /// This hook must be implemented to lower the incoming (formal)
+  /// arguments, described by \p Args, for GlobalISel. Each argument
+  /// must end up in the related virtual register described by VRegs.
+  /// In other words, the first argument should end up in VRegs[0],
+  /// the second in VRegs[1], and so on.
+  /// \p MIRBuilder is set to the proper insertion for the argument
+  /// lowering.
+  ///
+  /// \return True if the lowering succeeded, false otherwise.
+  virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder,
+                                    const Function &F,
+                                    ArrayRef<unsigned> VRegs) const {
+    return false;
+  }
+
+  /// This hook must be implemented to lower the given call instruction,
+  /// including argument and return value marshalling.
+  ///
+  /// \p CallConv is the calling convention to be used for the call.
+  ///
+  /// \p Callee is the destination of the call. It should be either a register,
+  /// globaladdress, or externalsymbol.
+  ///
+  /// \p ResTy is the type returned by the function
+  ///
+  /// \p ResReg is the generic virtual register that the returned
+  /// value should be lowered into.
+  ///
+  /// \p ArgTys is a list of the types each member of \p ArgRegs has; used by
+  /// the target to decide which register/stack slot should be allocated.
+  ///
+  /// \p ArgRegs is a list of virtual registers containing each argument that
+  /// needs to be passed.
+  ///
+  /// \return true if the lowering succeeded, false otherwise.
+  virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
+                         const MachineOperand &Callee, const ArgInfo &OrigRet,
+                         ArrayRef<ArgInfo> OrigArgs) const {
+    return false;
+  }
+
+  /// Lower the given call instruction, including argument and return value
+  /// marshalling.
+  ///
+  /// \p CI is the call/invoke instruction.
+  ///
+  /// \p ResReg is a register where the call's return value should be stored (or
+  /// 0 if there is no return value).
+  ///
+  /// \p ArgRegs is a list of virtual registers containing each argument that
+  /// needs to be passed.
+  ///
+  /// \p GetCalleeReg is a callback to materialize a register for the callee if
+  /// the target determines it cannot jump to the destination based purely on \p
+  /// CI. This might be because \p CI is indirect, or because of the limited
+  /// range of an immediate jump.
+  ///
+  /// \return true if the lowering succeeded, false otherwise.
+  bool lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
+                 unsigned ResReg, ArrayRef<unsigned> ArgRegs,
+                 std::function<unsigned()> GetCalleeReg) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Combiner.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Combiner.h
new file mode 100644
index 0000000..36a33de
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Combiner.h
@@ -0,0 +1,43 @@
+//== ----- llvm/CodeGen/GlobalISel/Combiner.h --------------------- == //
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// This contains common code to drive combines. Combiner Passes will need to
+/// setup a CombinerInfo and call combineMachineFunction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINER_H
+#define LLVM_CODEGEN_GLOBALISEL_COMBINER_H
+
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+class MachineRegisterInfo;
+class CombinerInfo;
+class TargetPassConfig;
+class MachineFunction;
+
+class Combiner {
+public:
+  Combiner(CombinerInfo &CombinerInfo, const TargetPassConfig *TPC);
+
+  bool combineMachineInstrs(MachineFunction &MF);
+
+protected:
+  CombinerInfo &CInfo;
+
+  MachineRegisterInfo *MRI = nullptr;
+  const TargetPassConfig *TPC;
+  MachineIRBuilder Builder;
+};
+
+} // End namespace llvm.
+
+#endif // LLVM_CODEGEN_GLOBALISEL_GICOMBINER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
new file mode 100644
index 0000000..5d5b839
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -0,0 +1,44 @@
+//== llvm/CodeGen/GlobalISel/CombinerHelper.h -------------- -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===--------------------------------------------------------------------===//
+//
+/// This contains common combine transformations that may be used in a combine
+/// pass,or by the target elsewhere.
+/// Targets can pick individual opcode transformations from the helper or use
+/// tryCombine which invokes all transformations. All of the transformations
+/// return true if the MachineInstruction changed and false otherwise.
+//
+//===--------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
+#define LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
+
+namespace llvm {
+
+class MachineIRBuilder;
+class MachineRegisterInfo;
+class MachineInstr;
+
+class CombinerHelper {
+  MachineIRBuilder &Builder;
+  MachineRegisterInfo &MRI;
+
+public:
+  CombinerHelper(MachineIRBuilder &B);
+
+  /// If \p MI is COPY, try to combine it.
+  /// Returns true if MI changed.
+  bool tryCombineCopy(MachineInstr &MI);
+
+  /// Try to transform \p MI by using all of the above
+  /// combine functions. Returns true if changed.
+  bool tryCombine(MachineInstr &MI);
+};
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerInfo.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
new file mode 100644
index 0000000..1d24854
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
@@ -0,0 +1,48 @@
+//===- llvm/CodeGen/GlobalISel/CombinerInfo.h ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// Interface for Targets to specify which operations are combined how and when.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINER_INFO_H
+#define LLVM_CODEGEN_GLOBALISEL_COMBINER_INFO_H
+
+#include <cassert>
+namespace llvm {
+
+class LegalizerInfo;
+class MachineInstr;
+class MachineIRBuilder;
+class MachineRegisterInfo;
+// Contains information relevant to enabling/disabling various combines for a
+// pass.
+class CombinerInfo {
+public:
+  CombinerInfo(bool AllowIllegalOps, bool ShouldLegalizeIllegal,
+               LegalizerInfo *LInfo)
+      : IllegalOpsAllowed(AllowIllegalOps),
+        LegalizeIllegalOps(ShouldLegalizeIllegal), LInfo(LInfo) {
+    assert(((AllowIllegalOps || !LegalizeIllegalOps) || LInfo) &&
+           "Expecting legalizerInfo when illegalops not allowed");
+  }
+  virtual ~CombinerInfo() = default;
+  /// If \p IllegalOpsAllowed is false, the CombinerHelper will make use of
+  /// the legalizerInfo to check for legality before each transformation.
+  bool IllegalOpsAllowed; // TODO: Make use of this.
+
+  /// If \p LegalizeIllegalOps is true, the Combiner will also legalize the
+  /// illegal ops that are created.
+  bool LegalizeIllegalOps; // TODO: Make use of this.
+  const LegalizerInfo *LInfo;
+  virtual bool combine(MachineInstr &MI, MachineIRBuilder &B) const = 0;
+};
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelWorkList.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
new file mode 100644
index 0000000..167905d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
@@ -0,0 +1,69 @@
+//===- GISelWorkList.h - Worklist for GISel passes ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_GISEL_WORKLIST_H
+#define LLVM_GISEL_WORKLIST_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Debug.h"
+
+namespace llvm {
+
+class MachineInstr;
+
+// Worklist which mostly works similar to InstCombineWorkList, but on MachineInstrs.
+// The main difference with something like a SetVector is that erasing an element doesn't
+// move all elements over one place - instead just nulls out the element of the vector.
+// FIXME: Does it make sense to factor out common code with the instcombinerWorkList?
+template<unsigned N>
+class GISelWorkList {
+  SmallVector<MachineInstr*, N> Worklist;
+  DenseMap<MachineInstr*, unsigned> WorklistMap;
+
+public:
+  GISelWorkList() = default;
+
+  bool empty() const { return WorklistMap.empty(); }
+
+  unsigned size() const { return WorklistMap.size(); }
+
+  /// Add - Add the specified instruction to the worklist if it isn't already
+  /// in it.
+  void insert(MachineInstr *I) {
+    if (WorklistMap.try_emplace(I, Worklist.size()).second) {
+      Worklist.push_back(I);
+    }
+  }
+
+  /// Remove - remove I from the worklist if it exists.
+  void remove(MachineInstr *I) {
+    auto It = WorklistMap.find(I);
+    if (It == WorklistMap.end()) return; // Not in worklist.
+
+    // Don't bother moving everything down, just null out the slot.
+    Worklist[It->second] = nullptr;
+
+    WorklistMap.erase(It);
+  }
+
+  MachineInstr *pop_back_val() {
+    MachineInstr *I;
+    do {
+      I = Worklist.pop_back_val();
+    } while(!I);
+    assert(I && "Pop back on empty worklist");
+    WorklistMap.erase(I);
+    return I;
+  }
+};
+
+} // end namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
new file mode 100644
index 0000000..7061c01
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -0,0 +1,444 @@
+//===- llvm/CodeGen/GlobalISel/IRTranslator.h - IRTranslator ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares the IRTranslator pass.
+/// This pass is responsible for translating LLVM IR into MachineInstr.
+/// It uses target hooks to lower the ABI but aside from that, the pass
+/// generated code is generic. This is the default translator used for
+/// GlobalISel.
+///
+/// \todo Replace the comments with actual doxygen comments.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
+#define LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Types.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/IR/Intrinsics.h"
+#include <memory>
+#include <utility>
+
+namespace llvm {
+
+class AllocaInst;
+class BasicBlock;
+class CallInst;
+class CallLowering;
+class Constant;
+class DataLayout;
+class Instruction;
+class MachineBasicBlock;
+class MachineFunction;
+class MachineInstr;
+class MachineRegisterInfo;
+class OptimizationRemarkEmitter;
+class PHINode;
+class TargetPassConfig;
+class User;
+class Value;
+
+// Technically the pass should run on an hypothetical MachineModule,
+// since it should translate Global into some sort of MachineGlobal.
+// The MachineGlobal should ultimately just be a transfer of ownership of
+// the interesting bits that are relevant to represent a global value.
+// That being said, we could investigate what would it cost to just duplicate
+// the information from the LLVM IR.
+// The idea is that ultimately we would be able to free up the memory used
+// by the LLVM IR as soon as the translation is over.
+class IRTranslator : public MachineFunctionPass {
+public:
+  static char ID;
+
+private:
+  /// Interface used to lower the everything related to calls.
+  const CallLowering *CLI;
+
+  /// Mapping of the values of the current LLVM IR function
+  /// to the related virtual registers.
+  ValueToVReg ValToVReg;
+
+  // N.b. it's not completely obvious that this will be sufficient for every
+  // LLVM IR construct (with "invoke" being the obvious candidate to mess up our
+  // lives.
+  DenseMap<const BasicBlock *, MachineBasicBlock *> BBToMBB;
+
+  // One BasicBlock can be translated to multiple MachineBasicBlocks.  For such
+  // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains
+  // a mapping between the edges arriving at the BasicBlock to the corresponding
+  // created MachineBasicBlocks. Some BasicBlocks that get translated to a
+  // single MachineBasicBlock may also end up in this Map.
+  using CFGEdge = std::pair<const BasicBlock *, const BasicBlock *>;
+  DenseMap<CFGEdge, SmallVector<MachineBasicBlock *, 1>> MachinePreds;
+
+  // List of stubbed PHI instructions, for values and basic blocks to be filled
+  // in once all MachineBasicBlocks have been created.
+  SmallVector<std::pair<const PHINode *, MachineInstr *>, 4> PendingPHIs;
+
+  /// Record of what frame index has been allocated to specified allocas for
+  /// this function.
+  DenseMap<const AllocaInst *, int> FrameIndices;
+
+  /// \name Methods for translating form LLVM IR to MachineInstr.
+  /// \see ::translate for general information on the translate methods.
+  /// @{
+
+  /// Translate \p Inst into its corresponding MachineInstr instruction(s).
+  /// Insert the newly translated instruction(s) right where the CurBuilder
+  /// is set.
+  ///
+  /// The general algorithm is:
+  /// 1. Look for a virtual register for each operand or
+  ///    create one.
+  /// 2 Update the ValToVReg accordingly.
+  /// 2.alt. For constant arguments, if they are compile time constants,
+  ///   produce an immediate in the right operand and do not touch
+  ///   ValToReg. Actually we will go with a virtual register for each
+  ///   constants because it may be expensive to actually materialize the
+  ///   constant. Moreover, if the constant spans on several instructions,
+  ///   CSE may not catch them.
+  ///   => Update ValToVReg and remember that we saw a constant in Constants.
+  ///   We will materialize all the constants in finalize.
+  /// Note: we would need to do something so that we can recognize such operand
+  ///       as constants.
+  /// 3. Create the generic instruction.
+  ///
+  /// \return true if the translation succeeded.
+  bool translate(const Instruction &Inst);
+
+  /// Materialize \p C into virtual-register \p Reg. The generic instructions
+  /// performing this materialization will be inserted into the entry block of
+  /// the function.
+  ///
+  /// \return true if the materialization succeeded.
+  bool translate(const Constant &C, unsigned Reg);
+
+  /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
+  /// emitted.
+  bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate an LLVM load instruction into generic IR.
+  bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate an LLVM store instruction into generic IR.
+  bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate an LLVM string intrinsic (memcpy, memset, ...).
+  bool translateMemfunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
+                        unsigned Intrinsic);
+
+  void getStackGuard(unsigned DstReg, MachineIRBuilder &MIRBuilder);
+
+  bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
+                                  MachineIRBuilder &MIRBuilder);
+
+  bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
+                               MachineIRBuilder &MIRBuilder);
+
+  bool translateInlineAsm(const CallInst &CI, MachineIRBuilder &MIRBuilder);
+
+  /// Translate call instruction.
+  /// \pre \p U is a call instruction.
+  bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate one of LLVM's cast instructions into MachineInstrs, with the
+  /// given generic Opcode.
+  bool translateCast(unsigned Opcode, const User &U,
+                     MachineIRBuilder &MIRBuilder);
+
+  /// Translate a phi instruction.
+  bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate a comparison (icmp or fcmp) instruction or constant.
+  bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate an integer compare instruction (or constant).
+  bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCompare(U, MIRBuilder);
+  }
+
+  /// Translate a floating-point compare instruction (or constant).
+  bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCompare(U, MIRBuilder);
+  }
+
+  /// Add remaining operands onto phis we've translated. Executed after all
+  /// MachineBasicBlocks for the function have been created.
+  void finishPendingPhis();
+
+  /// Translate \p Inst into a binary operation \p Opcode.
+  /// \pre \p U is a binary operation.
+  bool translateBinaryOp(unsigned Opcode, const User &U,
+                         MachineIRBuilder &MIRBuilder);
+
+  /// Translate branch (br) instruction.
+  /// \pre \p U is a branch instruction.
+  bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateExtractValue(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder);
+
+  /// Translate return (ret) instruction.
+  /// The target needs to implement CallLowering::lowerReturn for
+  /// this to succeed.
+  /// \pre \p U is a return instruction.
+  bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
+  }
+  bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_SUB, U, MIRBuilder);
+  }
+  bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_AND, U, MIRBuilder);
+  }
+  bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_MUL, U, MIRBuilder);
+  }
+  bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_OR, U, MIRBuilder);
+  }
+  bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_XOR, U, MIRBuilder);
+  }
+
+  bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_UDIV, U, MIRBuilder);
+  }
+  bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_SDIV, U, MIRBuilder);
+  }
+  bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_UREM, U, MIRBuilder);
+  }
+  bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_SREM, U, MIRBuilder);
+  }
+  bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_INTTOPTR, U, MIRBuilder);
+  }
+  bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
+  }
+  bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
+  }
+  bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_FPTRUNC, U, MIRBuilder);
+  }
+  bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_FPEXT, U, MIRBuilder);
+  }
+  bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_FPTOUI, U, MIRBuilder);
+  }
+  bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_FPTOSI, U, MIRBuilder);
+  }
+  bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_UITOFP, U, MIRBuilder);
+  }
+  bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_SITOFP, U, MIRBuilder);
+  }
+  bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder) {
+    return true;
+  }
+  bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_SEXT, U, MIRBuilder);
+  }
+
+  bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateCast(TargetOpcode::G_ZEXT, U, MIRBuilder);
+  }
+
+  bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_SHL, U, MIRBuilder);
+  }
+  bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_LSHR, U, MIRBuilder);
+  }
+  bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_ASHR, U, MIRBuilder);
+  }
+
+  bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
+  }
+  bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
+  }
+  bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_FDIV, U, MIRBuilder);
+  }
+  bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_FREM, U, MIRBuilder);
+  }
+
+  bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateExtractElement(const User &U, MachineIRBuilder &MIRBuilder);
+
+  bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);
+
+  // Stubs to keep the compiler happy while we implement the rest of the
+  // translation.
+  bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateFence(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+  bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) {
+    return false;
+  }
+
+  /// @}
+
+  // Builder for machine instruction a la IRBuilder.
+  // I.e., compared to regular MIBuilder, this one also inserts the instruction
+  // in the current block, it can creates block, etc., basically a kind of
+  // IRBuilder, but for Machine IR.
+  MachineIRBuilder CurBuilder;
+
+  // Builder set to the entry block (just after ABI lowering instructions). Used
+  // as a convenient location for Constants.
+  MachineIRBuilder EntryBuilder;
+
+  // The MachineFunction currently being translated.
+  MachineFunction *MF;
+
+  /// MachineRegisterInfo used to create virtual registers.
+  MachineRegisterInfo *MRI = nullptr;
+
+  const DataLayout *DL;
+
+  /// Current target configuration. Controls how the pass handles errors.
+  const TargetPassConfig *TPC;
+
+  /// Current optimization remark emitter. Used to report failures.
+  std::unique_ptr<OptimizationRemarkEmitter> ORE;
+
+  // * Insert all the code needed to materialize the constants
+  // at the proper place. E.g., Entry block or dominator block
+  // of each constant depending on how fancy we want to be.
+  // * Clear the different maps.
+  void finalizeFunction();
+
+  /// Get the VReg that represents \p Val.
+  /// If such VReg does not exist, it is created.
+  unsigned getOrCreateVReg(const Value &Val);
+
+  /// Get the frame index that represents \p Val.
+  /// If such VReg does not exist, it is created.
+  int getOrCreateFrameIndex(const AllocaInst &AI);
+
+  /// Get the alignment of the given memory operation instruction. This will
+  /// either be the explicitly specified value or the ABI-required alignment for
+  /// the type being accessed (according to the Module's DataLayout).
+  unsigned getMemOpAlignment(const Instruction &I);
+
+  /// Get the MachineBasicBlock that represents \p BB. Specifically, the block
+  /// returned will be the head of the translated block (suitable for branch
+  /// destinations).
+  MachineBasicBlock &getMBB(const BasicBlock &BB);
+
+  /// Record \p NewPred as a Machine predecessor to `Edge.second`, corresponding
+  /// to `Edge.first` at the IR level. This is used when IRTranslation creates
+  /// multiple MachineBasicBlocks for a given IR block and the CFG is no longer
+  /// represented simply by the IR-level CFG.
+  void addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred);
+
+  /// Returns the Machine IR predecessors for the given IR CFG edge. Usually
+  /// this is just the single MachineBasicBlock corresponding to the predecessor
+  /// in the IR. More complex lowering can result in multiple MachineBasicBlocks
+  /// preceding the original though (e.g. switch instructions).
+  SmallVector<MachineBasicBlock *, 1> getMachinePredBBs(CFGEdge Edge) {
+    auto RemappedEdge = MachinePreds.find(Edge);
+    if (RemappedEdge != MachinePreds.end())
+      return RemappedEdge->second;
+    return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(*Edge.first));
+  }
+
+public:
+  // Ctor, nothing fancy.
+  IRTranslator();
+
+  StringRef getPassName() const override { return "IRTranslator"; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  // Algo:
+  //   CallLowering = MF.subtarget.getCallLowering()
+  //   F = MF.getParent()
+  //   MIRBuilder.reset(MF)
+  //   getMBB(F.getEntryBB())
+  //   CallLowering->translateArguments(MIRBuilder, F, ValToVReg)
+  //   for each bb in F
+  //     getMBB(bb)
+  //     for each inst in bb
+  //       if (!translate(MIRBuilder, inst, ValToVReg, ConstantToSequence))
+  //         report_fatal_error("Don't know how to translate input");
+  //   finalize()
+  bool runOnMachineFunction(MachineFunction &MF) override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelect.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelect.h
new file mode 100644
index 0000000..01521c4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelect.h
@@ -0,0 +1,53 @@
+//== llvm/CodeGen/GlobalISel/InstructionSelect.h -----------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file This file describes the interface of the MachineFunctionPass
+/// responsible for selecting (possibly generic) machine instructions to
+/// target-specific instructions.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECT_H
+#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECT_H
+
+#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+/// This pass is responsible for selecting generic machine instructions to
+/// target-specific instructions.  It relies on the InstructionSelector provided
+/// by the target.
+/// Selection is done by examining blocks in post-order, and instructions in
+/// reverse order.
+///
+/// \post for all inst in MF: not isPreISelGenericOpcode(inst.opcode)
+class InstructionSelect : public MachineFunctionPass {
+public:
+  static char ID;
+  StringRef getPassName() const override { return "InstructionSelect"; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  MachineFunctionProperties getRequiredProperties() const override {
+    return MachineFunctionProperties()
+        .set(MachineFunctionProperties::Property::IsSSA)
+        .set(MachineFunctionProperties::Property::Legalized)
+        .set(MachineFunctionProperties::Property::RegBankSelected);
+  }
+
+  MachineFunctionProperties getSetProperties() const override {
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::Selected);
+  }
+
+  InstructionSelect();
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // End namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
new file mode 100644
index 0000000..eacd135
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
@@ -0,0 +1,381 @@
+//===- llvm/CodeGen/GlobalISel/InstructionSelector.h ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API for the instruction selector.
+/// This class is responsible for selecting machine instructions.
+/// It's implemented by the target. It's used by the InstructionSelect pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
+#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/CodeGenCoverage.h"
+#include <bitset>
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+#include <initializer_list>
+#include <vector>
+
+namespace llvm {
+
+class APInt;
+class APFloat;
+class LLT;
+class MachineInstr;
+class MachineInstrBuilder;
+class MachineFunction;
+class MachineOperand;
+class MachineRegisterInfo;
+class RegisterBankInfo;
+class TargetInstrInfo;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+/// Container class for CodeGen predicate results.
+/// This is convenient because std::bitset does not have a constructor
+/// with an initializer list of set bits.
+///
+/// Each InstructionSelector subclass should define a PredicateBitset class
+/// with:
+///   const unsigned MAX_SUBTARGET_PREDICATES = 192;
+///   using PredicateBitset = PredicateBitsetImpl<MAX_SUBTARGET_PREDICATES>;
+/// and updating the constant to suit the target. Tablegen provides a suitable
+/// definition for the predicates in use in <Target>GenGlobalISel.inc when
+/// GET_GLOBALISEL_PREDICATE_BITSET is defined.
+template <std::size_t MaxPredicates>
+class PredicateBitsetImpl : public std::bitset<MaxPredicates> {
+public:
+  // Cannot inherit constructors because it's not supported by VC++..
+  PredicateBitsetImpl() = default;
+
+  PredicateBitsetImpl(const std::bitset<MaxPredicates> &B)
+      : std::bitset<MaxPredicates>(B) {}
+
+  PredicateBitsetImpl(std::initializer_list<unsigned> Init) {
+    for (auto I : Init)
+      std::bitset<MaxPredicates>::set(I);
+  }
+};
+
+enum {
+  /// Begin a try-block to attempt a match and jump to OnFail if it is
+  /// unsuccessful.
+  /// - OnFail - The MatchTable entry at which to resume if the match fails.
+  ///
+  /// FIXME: This ought to take an argument indicating the number of try-blocks
+  ///        to exit on failure. It's usually one but the last match attempt of
+  ///        a block will need more. The (implemented) alternative is to tack a
+  ///        GIM_Reject on the end of each try-block which is simpler but
+  ///        requires an extra opcode and iteration in the interpreter on each
+  ///        failed match.
+  GIM_Try,
+
+  /// Record the specified instruction
+  /// - NewInsnID - Instruction ID to define
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  GIM_RecordInsn,
+
+  /// Check the feature bits
+  /// - Expected features
+  GIM_CheckFeatures,
+
+  /// Check the opcode on the specified instruction
+  /// - InsnID - Instruction ID
+  /// - Expected opcode
+  GIM_CheckOpcode,
+  /// Check the instruction has the right number of operands
+  /// - InsnID - Instruction ID
+  /// - Expected number of operands
+  GIM_CheckNumOperands,
+  /// Check an immediate predicate on the specified instruction
+  /// - InsnID - Instruction ID
+  /// - The predicate to test
+  GIM_CheckI64ImmPredicate,
+  /// Check an immediate predicate on the specified instruction via an APInt.
+  /// - InsnID - Instruction ID
+  /// - The predicate to test
+  GIM_CheckAPIntImmPredicate,
+  /// Check a floating point immediate predicate on the specified instruction.
+  /// - InsnID - Instruction ID
+  /// - The predicate to test
+  GIM_CheckAPFloatImmPredicate,
+  /// Check a memory operation has the specified atomic ordering.
+  /// - InsnID - Instruction ID
+  /// - Ordering - The AtomicOrdering value
+  GIM_CheckAtomicOrdering,
+  GIM_CheckAtomicOrderingOrStrongerThan,
+  GIM_CheckAtomicOrderingWeakerThan,
+
+  /// Check the type for the specified operand
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - Expected type
+  GIM_CheckType,
+  /// Check the type of a pointer to any address space.
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - SizeInBits - The size of the pointer value in bits.
+  GIM_CheckPointerToAny,
+  /// Check the register bank for the specified operand
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - Expected register bank (specified as a register class)
+  GIM_CheckRegBankForClass,
+  /// Check the operand matches a complex predicate
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - RendererID - The renderer to hold the result
+  /// - Complex predicate ID
+  GIM_CheckComplexPattern,
+  /// Check the operand is a specific integer
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - Expected integer
+  GIM_CheckConstantInt,
+  /// Check the operand is a specific literal integer (i.e. MO.isImm() or
+  /// MO.isCImm() is true).
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - Expected integer
+  GIM_CheckLiteralInt,
+  /// Check the operand is a specific intrinsic ID
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - Expected Intrinsic ID
+  GIM_CheckIntrinsicID,
+  /// Check the specified operand is an MBB
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  GIM_CheckIsMBB,
+
+  /// Check if the specified operand is safe to fold into the current
+  /// instruction.
+  /// - InsnID - Instruction ID
+  GIM_CheckIsSafeToFold,
+
+  /// Check the specified operands are identical.
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - OtherInsnID - Other instruction ID
+  /// - OtherOpIdx - Other operand index
+  GIM_CheckIsSameOperand,
+
+  /// Fail the current try-block, or completely fail to match if there is no
+  /// current try-block.
+  GIM_Reject,
+
+  //=== Renderers ===
+
+  /// Mutate an instruction
+  /// - NewInsnID - Instruction ID to define
+  /// - OldInsnID - Instruction ID to mutate
+  /// - NewOpcode - The new opcode to use
+  GIR_MutateOpcode,
+  /// Build a new instruction
+  /// - InsnID - Instruction ID to define
+  /// - Opcode - The new opcode to use
+  GIR_BuildMI,
+
+  /// Copy an operand to the specified instruction
+  /// - NewInsnID - Instruction ID to modify
+  /// - OldInsnID - Instruction ID to copy from
+  /// - OpIdx - The operand to copy
+  GIR_Copy,
+  /// Copy an operand to the specified instruction or add a zero register if the
+  /// operand is a zero immediate.
+  /// - NewInsnID - Instruction ID to modify
+  /// - OldInsnID - Instruction ID to copy from
+  /// - OpIdx - The operand to copy
+  /// - ZeroReg - The zero register to use
+  GIR_CopyOrAddZeroReg,
+  /// Copy an operand to the specified instruction
+  /// - NewInsnID - Instruction ID to modify
+  /// - OldInsnID - Instruction ID to copy from
+  /// - OpIdx - The operand to copy
+  /// - SubRegIdx - The subregister to copy
+  GIR_CopySubReg,
+  /// Add an implicit register def to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - RegNum - The register to add
+  GIR_AddImplicitDef,
+  /// Add an implicit register use to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - RegNum - The register to add
+  GIR_AddImplicitUse,
+  /// Add an register to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - RegNum - The register to add
+  GIR_AddRegister,
+  /// Add a temporary register to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - TempRegID - The temporary register ID to add
+  GIR_AddTempRegister,
+  /// Add an immediate to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - Imm - The immediate to add
+  GIR_AddImm,
+  /// Render complex operands to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - RendererID - The renderer to call
+  GIR_ComplexRenderer,
+  /// Render sub-operands of complex operands to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - RendererID - The renderer to call
+  /// - RenderOpID - The suboperand to render.
+  GIR_ComplexSubOperandRenderer,
+  /// Render operands to the specified instruction using a custom function
+  /// - InsnID - Instruction ID to modify
+  /// - OldInsnID - Instruction ID to get the matched operand from
+  /// - RendererFnID - Custom renderer function to call
+  GIR_CustomRenderer,
+
+  /// Render a G_CONSTANT operator as a sign-extended immediate.
+  /// - NewInsnID - Instruction ID to modify
+  /// - OldInsnID - Instruction ID to copy from
+  /// The operand index is implicitly 1.
+  GIR_CopyConstantAsSImm,
+
+  /// Constrain an instruction operand to a register class.
+  /// - InsnID - Instruction ID to modify
+  /// - OpIdx - Operand index
+  /// - RCEnum - Register class enumeration value
+  GIR_ConstrainOperandRC,
+  /// Constrain an instructions operands according to the instruction
+  /// description.
+  /// - InsnID - Instruction ID to modify
+  GIR_ConstrainSelectedInstOperands,
+  /// Merge all memory operands into instruction.
+  /// - InsnID - Instruction ID to modify
+  /// - MergeInsnID... - One or more Instruction ID to merge into the result.
+  /// - GIU_MergeMemOperands_EndOfList - Terminates the list of instructions to
+  ///                                    merge.
+  GIR_MergeMemOperands,
+  /// Erase from parent.
+  /// - InsnID - Instruction ID to erase
+  GIR_EraseFromParent,
+  /// Create a new temporary register that's not constrained.
+  /// - TempRegID - The temporary register ID to initialize.
+  /// - Expected type
+  GIR_MakeTempReg,
+
+  /// A successful emission
+  GIR_Done,
+
+  /// Increment the rule coverage counter.
+  /// - RuleID - The ID of the rule that was covered.
+  GIR_Coverage,
+};
+
+enum {
+  /// Indicates the end of the variable-length MergeInsnID list in a
+  /// GIR_MergeMemOperands opcode.
+  GIU_MergeMemOperands_EndOfList = -1,
+};
+
+/// Provides the logic to select generic machine instructions.
+class InstructionSelector {
+public:
+  virtual ~InstructionSelector() = default;
+
+  /// Select the (possibly generic) instruction \p I to only use target-specific
+  /// opcodes. It is OK to insert multiple instructions, but they cannot be
+  /// generic pre-isel instructions.
+  ///
+  /// \returns whether selection succeeded.
+  /// \pre  I.getParent() && I.getParent()->getParent()
+  /// \post
+  ///   if returns true:
+  ///     for I in all mutated/inserted instructions:
+  ///       !isPreISelGenericOpcode(I.getOpcode())
+  virtual bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const = 0;
+
+protected:
+  using ComplexRendererFns =
+      Optional<SmallVector<std::function<void(MachineInstrBuilder &)>, 4>>;
+  using RecordedMIVector = SmallVector<MachineInstr *, 4>;
+  using NewMIVector = SmallVector<MachineInstrBuilder, 4>;
+
+  struct MatcherState {
+    std::vector<ComplexRendererFns::value_type> Renderers;
+    RecordedMIVector MIs;
+    DenseMap<unsigned, unsigned> TempRegisters;
+
+    MatcherState(unsigned MaxRenderers);
+  };
+
+public:
+  template <class PredicateBitset, class ComplexMatcherMemFn,
+            class CustomRendererFn>
+  struct ISelInfoTy {
+    const LLT *TypeObjects;
+    const PredicateBitset *FeatureBitsets;
+    const ComplexMatcherMemFn *ComplexPredicates;
+    const CustomRendererFn *CustomRenderers;
+  };
+
+protected:
+  InstructionSelector();
+
+  /// Execute a given matcher table and return true if the match was successful
+  /// and false otherwise.
+  template <class TgtInstructionSelector, class PredicateBitset,
+            class ComplexMatcherMemFn, class CustomRendererFn>
+  bool executeMatchTable(
+      TgtInstructionSelector &ISel, NewMIVector &OutMIs, MatcherState &State,
+      const ISelInfoTy<PredicateBitset, ComplexMatcherMemFn, CustomRendererFn>
+          &ISelInfo,
+      const int64_t *MatchTable, const TargetInstrInfo &TII,
+      MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
+      const RegisterBankInfo &RBI, const PredicateBitset &AvailableFeatures,
+      CodeGenCoverage &CoverageInfo) const;
+
+  virtual bool testImmPredicate_I64(unsigned, int64_t) const {
+    llvm_unreachable("Subclasses must override this to use tablegen");
+  }
+  virtual bool testImmPredicate_APInt(unsigned, const APInt &) const {
+    llvm_unreachable("Subclasses must override this to use tablegen");
+  }
+  virtual bool testImmPredicate_APFloat(unsigned, const APFloat &) const {
+    llvm_unreachable("Subclasses must override this to use tablegen");
+  }
+
+  /// Constrain a register operand of an instruction \p I to a specified
+  /// register class. This could involve inserting COPYs before (for uses) or
+  /// after (for defs) and may replace the operand of \p I.
+  /// \returns whether operand regclass constraining succeeded.
+  bool constrainOperandRegToRegClass(MachineInstr &I, unsigned OpIdx,
+                                     const TargetRegisterClass &RC,
+                                     const TargetInstrInfo &TII,
+                                     const TargetRegisterInfo &TRI,
+                                     const RegisterBankInfo &RBI) const;
+
+  bool isOperandImmEqual(const MachineOperand &MO, int64_t Value,
+                         const MachineRegisterInfo &MRI) const;
+
+  /// Return true if the specified operand is a G_GEP with a G_CONSTANT on the
+  /// right-hand side. GlobalISel's separation of pointer and integer types
+  /// means that we don't need to worry about G_OR with equivalent semantics.
+  bool isBaseWithConstantOffset(const MachineOperand &Root,
+                                const MachineRegisterInfo &MRI) const;
+
+  /// Return true if MI can obviously be folded into IntoMI.
+  /// MI and IntoMI do not need to be in the same basic blocks, but MI must
+  /// preceed IntoMI.
+  bool isObviouslySafeToFold(MachineInstr &MI, MachineInstr &IntoMI) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
new file mode 100644
index 0000000..f7593ba
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
@@ -0,0 +1,769 @@
+//===- llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API for the instruction selector.
+/// This class is responsible for selecting machine instructions.
+/// It's implemented by the target. It's used by the InstructionSelect pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
+#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
+#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+
+namespace llvm {
+
+/// GlobalISel PatFrag Predicates
+enum {
+  GIPFP_I64_Invalid = 0,
+  GIPFP_APInt_Invalid = 0,
+  GIPFP_APFloat_Invalid = 0,
+};
+
+template <class TgtInstructionSelector, class PredicateBitset,
+          class ComplexMatcherMemFn, class CustomRendererFn>
+bool InstructionSelector::executeMatchTable(
+    TgtInstructionSelector &ISel, NewMIVector &OutMIs, MatcherState &State,
+    const ISelInfoTy<PredicateBitset, ComplexMatcherMemFn, CustomRendererFn>
+        &ISelInfo,
+    const int64_t *MatchTable, const TargetInstrInfo &TII,
+    MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
+    const RegisterBankInfo &RBI, const PredicateBitset &AvailableFeatures,
+    CodeGenCoverage &CoverageInfo) const {
+  uint64_t CurrentIdx = 0;
+  SmallVector<uint64_t, 8> OnFailResumeAt;
+
+  enum RejectAction { RejectAndGiveUp, RejectAndResume };
+  auto handleReject = [&]() -> RejectAction {
+    DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                    dbgs() << CurrentIdx << ": Rejected\n");
+    if (OnFailResumeAt.empty())
+      return RejectAndGiveUp;
+    CurrentIdx = OnFailResumeAt.back();
+    OnFailResumeAt.pop_back();
+    DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                    dbgs() << CurrentIdx << ": Resume at " << CurrentIdx << " ("
+                           << OnFailResumeAt.size() << " try-blocks remain)\n");
+    return RejectAndResume;
+  };
+
+  while (true) {
+    assert(CurrentIdx != ~0u && "Invalid MatchTable index");
+    switch (MatchTable[CurrentIdx++]) {
+    case GIM_Try: {
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": Begin try-block\n");
+      OnFailResumeAt.push_back(MatchTable[CurrentIdx++]);
+      break;
+    }
+
+    case GIM_RecordInsn: {
+      int64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+
+      // As an optimisation we require that MIs[0] is always the root. Refuse
+      // any attempt to modify it.
+      assert(NewInsnID != 0 && "Refusing to modify MIs[0]");
+
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (!MO.isReg()) {
+        DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                        dbgs() << CurrentIdx << ": Not a register\n");
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+        break;
+      }
+      if (TRI.isPhysicalRegister(MO.getReg())) {
+        DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                        dbgs() << CurrentIdx << ": Is a physical register\n");
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+        break;
+      }
+
+      MachineInstr *NewMI = MRI.getVRegDef(MO.getReg());
+      if ((size_t)NewInsnID < State.MIs.size())
+        State.MIs[NewInsnID] = NewMI;
+      else {
+        assert((size_t)NewInsnID == State.MIs.size() &&
+               "Expected to store MIs in order");
+        State.MIs.push_back(NewMI);
+      }
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": MIs[" << NewInsnID
+                             << "] = GIM_RecordInsn(" << InsnID << ", " << OpIdx
+                             << ")\n");
+      break;
+    }
+
+    case GIM_CheckFeatures: {
+      int64_t ExpectedBitsetID = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx
+                             << ": GIM_CheckFeatures(ExpectedBitsetID="
+                             << ExpectedBitsetID << ")\n");
+      if ((AvailableFeatures & ISelInfo.FeatureBitsets[ExpectedBitsetID]) !=
+          ISelInfo.FeatureBitsets[ExpectedBitsetID]) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+
+    case GIM_CheckOpcode: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t Expected = MatchTable[CurrentIdx++];
+
+      unsigned Opcode = State.MIs[InsnID]->getOpcode();
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
+                             << "], ExpectedOpcode=" << Expected
+                             << ") // Got=" << Opcode << "\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      if (Opcode != Expected) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+
+    case GIM_CheckNumOperands: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t Expected = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckNumOperands(MIs["
+                             << InsnID << "], Expected=" << Expected << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      if (State.MIs[InsnID]->getNumOperands() != Expected) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+    case GIM_CheckI64ImmPredicate: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t Predicate = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs()
+                          << CurrentIdx << ": GIM_CheckI64ImmPredicate(MIs["
+                          << InsnID << "], Predicate=" << Predicate << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_CONSTANT &&
+             "Expected G_CONSTANT");
+      assert(Predicate > GIPFP_I64_Invalid && "Expected a valid predicate");
+      int64_t Value = 0;
+      if (State.MIs[InsnID]->getOperand(1).isCImm())
+        Value = State.MIs[InsnID]->getOperand(1).getCImm()->getSExtValue();
+      else if (State.MIs[InsnID]->getOperand(1).isImm())
+        Value = State.MIs[InsnID]->getOperand(1).getImm();
+      else
+        llvm_unreachable("Expected Imm or CImm operand");
+
+      if (!testImmPredicate_I64(Predicate, Value))
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      break;
+    }
+    case GIM_CheckAPIntImmPredicate: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t Predicate = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs()
+                          << CurrentIdx << ": GIM_CheckAPIntImmPredicate(MIs["
+                          << InsnID << "], Predicate=" << Predicate << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      assert(State.MIs[InsnID]->getOpcode() && "Expected G_CONSTANT");
+      assert(Predicate > GIPFP_APInt_Invalid && "Expected a valid predicate");
+      APInt Value;
+      if (State.MIs[InsnID]->getOperand(1).isCImm())
+        Value = State.MIs[InsnID]->getOperand(1).getCImm()->getValue();
+      else
+        llvm_unreachable("Expected Imm or CImm operand");
+
+      if (!testImmPredicate_APInt(Predicate, Value))
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      break;
+    }
+    case GIM_CheckAPFloatImmPredicate: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t Predicate = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs()
+                          << CurrentIdx << ": GIM_CheckAPFloatImmPredicate(MIs["
+                          << InsnID << "], Predicate=" << Predicate << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_FCONSTANT &&
+             "Expected G_FCONSTANT");
+      assert(State.MIs[InsnID]->getOperand(1).isFPImm() && "Expected FPImm operand");
+      assert(Predicate > GIPFP_APFloat_Invalid && "Expected a valid predicate");
+      APFloat Value = State.MIs[InsnID]->getOperand(1).getFPImm()->getValueAPF();
+
+      if (!testImmPredicate_APFloat(Predicate, Value))
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      break;
+    }
+    case GIM_CheckAtomicOrdering: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckAtomicOrdering(MIs["
+                             << InsnID << "], " << (uint64_t)Ordering << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      if (!State.MIs[InsnID]->hasOneMemOperand())
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+
+      for (const auto &MMO : State.MIs[InsnID]->memoperands())
+        if (MMO->getOrdering() != Ordering)
+          if (handleReject() == RejectAndGiveUp)
+            return false;
+      break;
+    }
+    case GIM_CheckAtomicOrderingOrStrongerThan: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx
+                             << ": GIM_CheckAtomicOrderingOrStrongerThan(MIs["
+                             << InsnID << "], " << (uint64_t)Ordering << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      if (!State.MIs[InsnID]->hasOneMemOperand())
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+
+      for (const auto &MMO : State.MIs[InsnID]->memoperands())
+        if (!isAtLeastOrStrongerThan(MMO->getOrdering(), Ordering))
+          if (handleReject() == RejectAndGiveUp)
+            return false;
+      break;
+    }
+    case GIM_CheckAtomicOrderingWeakerThan: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx
+                             << ": GIM_CheckAtomicOrderingWeakerThan(MIs["
+                             << InsnID << "], " << (uint64_t)Ordering << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      if (!State.MIs[InsnID]->hasOneMemOperand())
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+
+      for (const auto &MMO : State.MIs[InsnID]->memoperands())
+        if (!isStrongerThan(Ordering, MMO->getOrdering()))
+          if (handleReject() == RejectAndGiveUp)
+            return false;
+      break;
+    }
+    case GIM_CheckType: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t TypeID = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckType(MIs[" << InsnID
+                             << "]->getOperand(" << OpIdx
+                             << "), TypeID=" << TypeID << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (!MO.isReg() ||
+          MRI.getType(MO.getReg()) != ISelInfo.TypeObjects[TypeID]) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+    case GIM_CheckPointerToAny: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t SizeInBits = MatchTable[CurrentIdx++];
+
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckPointerToAny(MIs["
+                             << InsnID << "]->getOperand(" << OpIdx
+                             << "), SizeInBits=" << SizeInBits << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      // iPTR must be looked up in the target.
+      if (SizeInBits == 0) {
+        MachineFunction *MF = State.MIs[InsnID]->getParent()->getParent();
+        SizeInBits = MF->getDataLayout().getPointerSizeInBits(0);
+      }
+
+      assert(SizeInBits != 0 && "Pointer size must be known");
+
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (MO.isReg()) {
+        const LLT &Ty = MRI.getType(MO.getReg());
+        if (!Ty.isPointer() || Ty.getSizeInBits() != SizeInBits)
+          if (handleReject() == RejectAndGiveUp)
+            return false;
+      } else if (handleReject() == RejectAndGiveUp)
+        return false;
+
+      break;
+    }
+    case GIM_CheckRegBankForClass: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t RCEnum = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckRegBankForClass(MIs["
+                             << InsnID << "]->getOperand(" << OpIdx
+                             << "), RCEnum=" << RCEnum << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (!MO.isReg() ||
+          &RBI.getRegBankFromRegClass(*TRI.getRegClass(RCEnum)) !=
+              RBI.getRegBank(MO.getReg(), MRI, TRI)) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+
+    case GIM_CheckComplexPattern: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t RendererID = MatchTable[CurrentIdx++];
+      int64_t ComplexPredicateID = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": State.Renderers[" << RendererID
+                             << "] = GIM_CheckComplexPattern(MIs[" << InsnID
+                             << "]->getOperand(" << OpIdx
+                             << "), ComplexPredicateID=" << ComplexPredicateID
+                             << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      // FIXME: Use std::invoke() when it's available.
+      ComplexRendererFns Renderer =
+          (ISel.*ISelInfo.ComplexPredicates[ComplexPredicateID])(
+              State.MIs[InsnID]->getOperand(OpIdx));
+      if (Renderer.hasValue())
+        State.Renderers[RendererID] = Renderer.getValue();
+      else
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      break;
+    }
+
+    case GIM_CheckConstantInt: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t Value = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckConstantInt(MIs["
+                             << InsnID << "]->getOperand(" << OpIdx
+                             << "), Value=" << Value << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (MO.isReg()) {
+        // isOperandImmEqual() will sign-extend to 64-bits, so should we.
+        LLT Ty = MRI.getType(MO.getReg());
+        Value = SignExtend64(Value, Ty.getSizeInBits());
+
+        if (!isOperandImmEqual(MO, Value, MRI)) {
+          if (handleReject() == RejectAndGiveUp)
+            return false;
+        }
+      } else if (handleReject() == RejectAndGiveUp)
+        return false;
+
+      break;
+    }
+
+    case GIM_CheckLiteralInt: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t Value = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckLiteralInt(MIs["
+                             << InsnID << "]->getOperand(" << OpIdx
+                             << "), Value=" << Value << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (!MO.isCImm() || !MO.getCImm()->equalsInt(Value)) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+
+    case GIM_CheckIntrinsicID: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t Value = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckIntrinsicID(MIs["
+                             << InsnID << "]->getOperand(" << OpIdx
+                             << "), Value=" << Value << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (!MO.isIntrinsicID() || MO.getIntrinsicID() != Value)
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      break;
+    }
+
+    case GIM_CheckIsMBB: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckIsMBB(MIs[" << InsnID
+                             << "]->getOperand(" << OpIdx << "))\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      if (!State.MIs[InsnID]->getOperand(OpIdx).isMBB()) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+
+    case GIM_CheckIsSafeToFold: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckIsSafeToFold(MIs["
+                             << InsnID << "])\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      if (!isObviouslySafeToFold(*State.MIs[InsnID], *State.MIs[0])) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+    case GIM_CheckIsSameOperand: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t OtherInsnID = MatchTable[CurrentIdx++];
+      int64_t OtherOpIdx = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckIsSameOperand(MIs["
+                             << InsnID << "][" << OpIdx << "], MIs["
+                             << OtherInsnID << "][" << OtherOpIdx << "])\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      assert(State.MIs[OtherInsnID] != nullptr && "Used insn before defined");
+      if (!State.MIs[InsnID]->getOperand(OpIdx).isIdenticalTo(
+              State.MIs[OtherInsnID]->getOperand(OtherOpIdx))) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
+    case GIM_Reject:
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_Reject");
+      if (handleReject() == RejectAndGiveUp)
+        return false;
+      break;
+
+    case GIR_MutateOpcode: {
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      uint64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t NewOpcode = MatchTable[CurrentIdx++];
+      if (NewInsnID >= OutMIs.size())
+        OutMIs.resize(NewInsnID + 1);
+
+      OutMIs[NewInsnID] = MachineInstrBuilder(*State.MIs[OldInsnID]->getMF(),
+                                              State.MIs[OldInsnID]);
+      OutMIs[NewInsnID]->setDesc(TII.get(NewOpcode));
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_MutateOpcode(OutMIs["
+                             << NewInsnID << "], MIs[" << OldInsnID << "], "
+                             << NewOpcode << ")\n");
+      break;
+    }
+
+    case GIR_BuildMI: {
+      uint64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t Opcode = MatchTable[CurrentIdx++];
+      if (NewInsnID >= OutMIs.size())
+        OutMIs.resize(NewInsnID + 1);
+
+      OutMIs[NewInsnID] = BuildMI(*State.MIs[0]->getParent(), State.MIs[0],
+                                  State.MIs[0]->getDebugLoc(), TII.get(Opcode));
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_BuildMI(OutMIs["
+                             << NewInsnID << "], " << Opcode << ")\n");
+      break;
+    }
+
+    case GIR_Copy: {
+      int64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+      OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(OpIdx));
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs()
+                          << CurrentIdx << ": GIR_Copy(OutMIs[" << NewInsnID
+                          << "], MIs[" << OldInsnID << "], " << OpIdx << ")\n");
+      break;
+    }
+
+    case GIR_CopyOrAddZeroReg: {
+      int64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t ZeroReg = MatchTable[CurrentIdx++];
+      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+      MachineOperand &MO = State.MIs[OldInsnID]->getOperand(OpIdx);
+      if (isOperandImmEqual(MO, 0, MRI))
+        OutMIs[NewInsnID].addReg(ZeroReg);
+      else
+        OutMIs[NewInsnID].add(MO);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_CopyOrAddZeroReg(OutMIs["
+                             << NewInsnID << "], MIs[" << OldInsnID << "], "
+                             << OpIdx << ", " << ZeroReg << ")\n");
+      break;
+    }
+
+    case GIR_CopySubReg: {
+      int64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t SubRegIdx = MatchTable[CurrentIdx++];
+      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+      OutMIs[NewInsnID].addReg(State.MIs[OldInsnID]->getOperand(OpIdx).getReg(),
+                               0, SubRegIdx);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_CopySubReg(OutMIs["
+                             << NewInsnID << "], MIs[" << OldInsnID << "], "
+                             << OpIdx << ", " << SubRegIdx << ")\n");
+      break;
+    }
+
+    case GIR_AddImplicitDef: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t RegNum = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      OutMIs[InsnID].addDef(RegNum, RegState::Implicit);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_AddImplicitDef(OutMIs["
+                             << InsnID << "], " << RegNum << ")\n");
+      break;
+    }
+
+    case GIR_AddImplicitUse: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t RegNum = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      OutMIs[InsnID].addUse(RegNum, RegState::Implicit);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_AddImplicitUse(OutMIs["
+                             << InsnID << "], " << RegNum << ")\n");
+      break;
+    }
+
+    case GIR_AddRegister: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t RegNum = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      OutMIs[InsnID].addReg(RegNum);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_AddRegister(OutMIs["
+                             << InsnID << "], " << RegNum << ")\n");
+      break;
+    }
+
+    case GIR_AddTempRegister: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t TempRegID = MatchTable[CurrentIdx++];
+      uint64_t TempRegFlags = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      OutMIs[InsnID].addReg(State.TempRegisters[TempRegID], TempRegFlags);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_AddTempRegister(OutMIs["
+                             << InsnID << "], TempRegisters[" << TempRegID
+                             << "], " << TempRegFlags << ")\n");
+      break;
+    }
+
+    case GIR_AddImm: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t Imm = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      OutMIs[InsnID].addImm(Imm);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_AddImm(OutMIs[" << InsnID
+                             << "], " << Imm << ")\n");
+      break;
+    }
+
+    case GIR_ComplexRenderer: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t RendererID = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      for (const auto &RenderOpFn : State.Renderers[RendererID])
+        RenderOpFn(OutMIs[InsnID]);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_ComplexRenderer(OutMIs["
+                             << InsnID << "], " << RendererID << ")\n");
+      break;
+    }
+    case GIR_ComplexSubOperandRenderer: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t RendererID = MatchTable[CurrentIdx++];
+      int64_t RenderOpID = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      State.Renderers[RendererID][RenderOpID](OutMIs[InsnID]);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx
+                             << ": GIR_ComplexSubOperandRenderer(OutMIs["
+                             << InsnID << "], " << RendererID << ", "
+                             << RenderOpID << ")\n");
+      break;
+    }
+
+    case GIR_CopyConstantAsSImm: {
+      int64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+      assert(State.MIs[OldInsnID]->getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
+      if (State.MIs[OldInsnID]->getOperand(1).isCImm()) {
+        OutMIs[NewInsnID].addImm(
+            State.MIs[OldInsnID]->getOperand(1).getCImm()->getSExtValue());
+      } else if (State.MIs[OldInsnID]->getOperand(1).isImm())
+        OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(1));
+      else
+        llvm_unreachable("Expected Imm or CImm operand");
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_CopyConstantAsSImm(OutMIs["
+                             << NewInsnID << "], MIs[" << OldInsnID << "])\n");
+      break;
+    }
+
+    case GIR_CustomRenderer: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      int64_t RendererFnID = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_CustomRenderer(OutMIs["
+                             << InsnID << "], MIs[" << OldInsnID << "], "
+                             << RendererFnID << ")\n");
+      (ISel.*ISelInfo.CustomRenderers[RendererFnID])(OutMIs[InsnID],
+                                                     *State.MIs[OldInsnID]);
+      break;
+    }
+    case GIR_ConstrainOperandRC: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t RCEnum = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      constrainOperandRegToRegClass(*OutMIs[InsnID].getInstr(), OpIdx,
+                                    *TRI.getRegClass(RCEnum), TII, TRI, RBI);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_ConstrainOperandRC(OutMIs["
+                             << InsnID << "], " << OpIdx << ", " << RCEnum
+                             << ")\n");
+      break;
+    }
+
+    case GIR_ConstrainSelectedInstOperands: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+      constrainSelectedInstRegOperands(*OutMIs[InsnID].getInstr(), TII, TRI,
+                                       RBI);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx
+                             << ": GIR_ConstrainSelectedInstOperands(OutMIs["
+                             << InsnID << "])\n");
+      break;
+    }
+
+    case GIR_MergeMemOperands: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_MergeMemOperands(OutMIs["
+                             << InsnID << "]");
+      int64_t MergeInsnID = GIU_MergeMemOperands_EndOfList;
+      while ((MergeInsnID = MatchTable[CurrentIdx++]) !=
+             GIU_MergeMemOperands_EndOfList) {
+        DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                        dbgs() << ", MIs[" << MergeInsnID << "]");
+        for (const auto &MMO : State.MIs[MergeInsnID]->memoperands())
+          OutMIs[InsnID].addMemOperand(MMO);
+      }
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(), dbgs() << ")\n");
+      break;
+    }
+
+    case GIR_EraseFromParent: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      assert(State.MIs[InsnID] &&
+             "Attempted to erase an undefined instruction");
+      State.MIs[InsnID]->eraseFromParent();
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_EraseFromParent(MIs["
+                             << InsnID << "])\n");
+      break;
+    }
+
+    case GIR_MakeTempReg: {
+      int64_t TempRegID = MatchTable[CurrentIdx++];
+      int64_t TypeID = MatchTable[CurrentIdx++];
+
+      State.TempRegisters[TempRegID] =
+          MRI.createGenericVirtualRegister(ISelInfo.TypeObjects[TypeID]);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": TempRegs[" << TempRegID
+                             << "] = GIR_MakeTempReg(" << TypeID << ")\n");
+      break;
+    }
+
+    case GIR_Coverage: {
+      int64_t RuleID = MatchTable[CurrentIdx++];
+      CoverageInfo.setCovered(RuleID);
+
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs()
+                          << CurrentIdx << ": GIR_Coverage(" << RuleID << ")");
+      break;
+    }
+
+    case GIR_Done:
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_Done");
+      return true;
+
+    default:
+      llvm_unreachable("Unexpected command");
+    }
+  }
+}
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
new file mode 100644
index 0000000..a1f564b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -0,0 +1,287 @@
+//===-- llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h --===========//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file contains some helper functions which try to cleanup artifacts
+// such as G_TRUNCs/G_[ZSA]EXTENDS that were created during legalization to make
+// the types match. This file also contains some combines of merges that happens
+// at the end of the legalization.
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/GlobalISel/Legalizer.h"
+#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "legalizer"
+
+namespace llvm {
+class LegalizationArtifactCombiner {
+  MachineIRBuilder &Builder;
+  MachineRegisterInfo &MRI;
+  const LegalizerInfo &LI;
+
+public:
+  LegalizationArtifactCombiner(MachineIRBuilder &B, MachineRegisterInfo &MRI,
+                    const LegalizerInfo &LI)
+      : Builder(B), MRI(MRI), LI(LI) {}
+
+  bool tryCombineAnyExt(MachineInstr &MI,
+                        SmallVectorImpl<MachineInstr *> &DeadInsts) {
+    if (MI.getOpcode() != TargetOpcode::G_ANYEXT)
+      return false;
+    if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_TRUNC,
+                                           MI.getOperand(1).getReg(), MRI)) {
+      DEBUG(dbgs() << ".. Combine MI: " << MI;);
+      unsigned DstReg = MI.getOperand(0).getReg();
+      unsigned SrcReg = DefMI->getOperand(1).getReg();
+      Builder.setInstr(MI);
+      // We get a copy/trunc/extend depending on the sizes
+      Builder.buildAnyExtOrTrunc(DstReg, SrcReg);
+      markInstAndDefDead(MI, *DefMI, DeadInsts);
+      return true;
+    }
+    return tryFoldImplicitDef(MI, DeadInsts);
+  }
+
+  bool tryCombineZExt(MachineInstr &MI,
+                      SmallVectorImpl<MachineInstr *> &DeadInsts) {
+
+    if (MI.getOpcode() != TargetOpcode::G_ZEXT)
+      return false;
+    if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_TRUNC,
+                                           MI.getOperand(1).getReg(), MRI)) {
+      unsigned DstReg = MI.getOperand(0).getReg();
+      LLT DstTy = MRI.getType(DstReg);
+      if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
+          isInstUnsupported({TargetOpcode::G_CONSTANT, {DstTy}}))
+        return false;
+      DEBUG(dbgs() << ".. Combine MI: " << MI;);
+      Builder.setInstr(MI);
+      unsigned ZExtSrc = MI.getOperand(1).getReg();
+      LLT ZExtSrcTy = MRI.getType(ZExtSrc);
+      APInt Mask = APInt::getAllOnesValue(ZExtSrcTy.getSizeInBits());
+      auto MaskCstMIB = Builder.buildConstant(DstTy, Mask.getZExtValue());
+      unsigned TruncSrc = DefMI->getOperand(1).getReg();
+      // We get a copy/trunc/extend depending on the sizes
+      auto SrcCopyOrTrunc = Builder.buildAnyExtOrTrunc(DstTy, TruncSrc);
+      Builder.buildAnd(DstReg, SrcCopyOrTrunc, MaskCstMIB);
+      markInstAndDefDead(MI, *DefMI, DeadInsts);
+      return true;
+    }
+    return tryFoldImplicitDef(MI, DeadInsts);
+  }
+
+  bool tryCombineSExt(MachineInstr &MI,
+                      SmallVectorImpl<MachineInstr *> &DeadInsts) {
+
+    if (MI.getOpcode() != TargetOpcode::G_SEXT)
+      return false;
+    if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_TRUNC,
+                                           MI.getOperand(1).getReg(), MRI)) {
+      unsigned DstReg = MI.getOperand(0).getReg();
+      LLT DstTy = MRI.getType(DstReg);
+      if (isInstUnsupported({TargetOpcode::G_SHL, {DstTy}}) ||
+          isInstUnsupported({TargetOpcode::G_ASHR, {DstTy}}) ||
+          isInstUnsupported({TargetOpcode::G_CONSTANT, {DstTy}}))
+        return false;
+      DEBUG(dbgs() << ".. Combine MI: " << MI;);
+      Builder.setInstr(MI);
+      unsigned SExtSrc = MI.getOperand(1).getReg();
+      LLT SExtSrcTy = MRI.getType(SExtSrc);
+      unsigned SizeDiff = DstTy.getSizeInBits() - SExtSrcTy.getSizeInBits();
+      auto SizeDiffMIB = Builder.buildConstant(DstTy, SizeDiff);
+      unsigned TruncSrcReg = DefMI->getOperand(1).getReg();
+      // We get a copy/trunc/extend depending on the sizes
+      auto SrcCopyExtOrTrunc = Builder.buildAnyExtOrTrunc(DstTy, TruncSrcReg);
+      auto ShlMIB = Builder.buildInstr(TargetOpcode::G_SHL, DstTy,
+                                       SrcCopyExtOrTrunc, SizeDiffMIB);
+      Builder.buildInstr(TargetOpcode::G_ASHR, DstReg, ShlMIB, SizeDiffMIB);
+      markInstAndDefDead(MI, *DefMI, DeadInsts);
+      return true;
+    }
+    return tryFoldImplicitDef(MI, DeadInsts);
+  }
+
+  /// Try to fold sb = EXTEND (G_IMPLICIT_DEF sa) -> sb = G_IMPLICIT_DEF
+  bool tryFoldImplicitDef(MachineInstr &MI,
+                          SmallVectorImpl<MachineInstr *> &DeadInsts) {
+    unsigned Opcode = MI.getOpcode();
+    if (Opcode != TargetOpcode::G_ANYEXT && Opcode != TargetOpcode::G_ZEXT &&
+        Opcode != TargetOpcode::G_SEXT)
+      return false;
+
+    if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
+                                           MI.getOperand(1).getReg(), MRI)) {
+      unsigned DstReg = MI.getOperand(0).getReg();
+      LLT DstTy = MRI.getType(DstReg);
+      if (isInstUnsupported({TargetOpcode::G_IMPLICIT_DEF, {DstTy}}))
+        return false;
+      DEBUG(dbgs() << ".. Combine EXT(IMPLICIT_DEF) " << MI;);
+      Builder.setInstr(MI);
+      Builder.buildInstr(TargetOpcode::G_IMPLICIT_DEF, DstReg);
+      markInstAndDefDead(MI, *DefMI, DeadInsts);
+      return true;
+    }
+    return false;
+  }
+
+  bool tryCombineMerges(MachineInstr &MI,
+                        SmallVectorImpl<MachineInstr *> &DeadInsts) {
+
+    if (MI.getOpcode() != TargetOpcode::G_UNMERGE_VALUES)
+      return false;
+
+    unsigned NumDefs = MI.getNumOperands() - 1;
+    unsigned SrcReg = MI.getOperand(NumDefs).getReg();
+    MachineInstr *MergeI = MRI.getVRegDef(SrcReg);
+    if (!MergeI || (MergeI->getOpcode() != TargetOpcode::G_MERGE_VALUES))
+      return false;
+
+    const unsigned NumMergeRegs = MergeI->getNumOperands() - 1;
+
+    if (NumMergeRegs < NumDefs) {
+      if (NumDefs % NumMergeRegs != 0)
+        return false;
+
+      Builder.setInstr(MI);
+      // Transform to UNMERGEs, for example
+      //   %1 = G_MERGE_VALUES %4, %5
+      //   %9, %10, %11, %12 = G_UNMERGE_VALUES %1
+      // to
+      //   %9, %10 = G_UNMERGE_VALUES %4
+      //   %11, %12 = G_UNMERGE_VALUES %5
+
+      const unsigned NewNumDefs = NumDefs / NumMergeRegs;
+      for (unsigned Idx = 0; Idx < NumMergeRegs; ++Idx) {
+        SmallVector<unsigned, 2> DstRegs;
+        for (unsigned j = 0, DefIdx = Idx * NewNumDefs; j < NewNumDefs;
+             ++j, ++DefIdx)
+          DstRegs.push_back(MI.getOperand(DefIdx).getReg());
+
+        Builder.buildUnmerge(DstRegs, MergeI->getOperand(Idx + 1).getReg());
+      }
+
+    } else if (NumMergeRegs > NumDefs) {
+      if (NumMergeRegs % NumDefs != 0)
+        return false;
+
+      Builder.setInstr(MI);
+      // Transform to MERGEs
+      //   %6 = G_MERGE_VALUES %17, %18, %19, %20
+      //   %7, %8 = G_UNMERGE_VALUES %6
+      // to
+      //   %7 = G_MERGE_VALUES %17, %18
+      //   %8 = G_MERGE_VALUES %19, %20
+
+      const unsigned NumRegs = NumMergeRegs / NumDefs;
+      for (unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) {
+        SmallVector<unsigned, 2> Regs;
+        for (unsigned j = 0, Idx = NumRegs * DefIdx + 1; j < NumRegs;
+             ++j, ++Idx)
+          Regs.push_back(MergeI->getOperand(Idx).getReg());
+
+        Builder.buildMerge(MI.getOperand(DefIdx).getReg(), Regs);
+      }
+
+    } else {
+      // FIXME: is a COPY appropriate if the types mismatch? We know both
+      // registers are allocatable by now.
+      if (MRI.getType(MI.getOperand(0).getReg()) !=
+          MRI.getType(MergeI->getOperand(1).getReg()))
+        return false;
+
+      for (unsigned Idx = 0; Idx < NumDefs; ++Idx)
+        MRI.replaceRegWith(MI.getOperand(Idx).getReg(),
+                           MergeI->getOperand(Idx + 1).getReg());
+    }
+
+    markInstAndDefDead(MI, *MergeI, DeadInsts);
+    return true;
+  }
+
+  /// Try to combine away MI.
+  /// Returns true if it combined away the MI.
+  /// Adds instructions that are dead as a result of the combine
+  /// into DeadInsts, which can include MI.
+  bool tryCombineInstruction(MachineInstr &MI,
+                             SmallVectorImpl<MachineInstr *> &DeadInsts) {
+    switch (MI.getOpcode()) {
+    default:
+      return false;
+    case TargetOpcode::G_ANYEXT:
+      return tryCombineAnyExt(MI, DeadInsts);
+    case TargetOpcode::G_ZEXT:
+      return tryCombineZExt(MI, DeadInsts);
+    case TargetOpcode::G_SEXT:
+      return tryCombineSExt(MI, DeadInsts);
+    case TargetOpcode::G_UNMERGE_VALUES:
+      return tryCombineMerges(MI, DeadInsts);
+    case TargetOpcode::G_TRUNC: {
+      bool Changed = false;
+      for (auto &Use : MRI.use_instructions(MI.getOperand(0).getReg()))
+        Changed |= tryCombineInstruction(Use, DeadInsts);
+      return Changed;
+    }
+    }
+  }
+
+private:
+  /// Mark MI as dead. If a def of one of MI's operands, DefMI, would also be
+  /// dead due to MI being killed, then mark DefMI as dead too.
+  /// Some of the combines (extends(trunc)), try to walk through redundant
+  /// copies in between the extends and the truncs, and this attempts to collect
+  /// the in between copies if they're dead.
+  void markInstAndDefDead(MachineInstr &MI, MachineInstr &DefMI,
+                          SmallVectorImpl<MachineInstr *> &DeadInsts) {
+    DeadInsts.push_back(&MI);
+
+    // Collect all the copy instructions that are made dead, due to deleting
+    // this instruction. Collect all of them until the Trunc(DefMI).
+    // Eg,
+    // %1(s1) = G_TRUNC %0(s32)
+    // %2(s1) = COPY %1(s1)
+    // %3(s1) = COPY %2(s1)
+    // %4(s32) = G_ANYEXT %3(s1)
+    // In this case, we would have replaced %4 with a copy of %0,
+    // and as a result, %3, %2, %1 are dead.
+    MachineInstr *PrevMI = &MI;
+    while (PrevMI != &DefMI) {
+      // If we're dealing with G_UNMERGE_VALUES, tryCombineMerges doesn't really try
+      // to fold copies in between and we can ignore them here.
+      if (PrevMI->getOpcode() == TargetOpcode::G_UNMERGE_VALUES)
+        break;
+      unsigned PrevRegSrc = PrevMI->getOperand(1).getReg();
+      MachineInstr *TmpDef = MRI.getVRegDef(PrevRegSrc);
+      if (MRI.hasOneUse(PrevRegSrc)) {
+        if (TmpDef != &DefMI) {
+          assert(TmpDef->getOpcode() == TargetOpcode::COPY &&
+                 "Expecting copy here");
+          DeadInsts.push_back(TmpDef);
+        }
+      } else
+        break;
+      PrevMI = TmpDef;
+    }
+    if ((PrevMI == &DefMI ||
+         DefMI.getOpcode() == TargetOpcode::G_MERGE_VALUES) &&
+        MRI.hasOneUse(DefMI.getOperand(0).getReg()))
+      DeadInsts.push_back(&DefMI);
+  }
+
+  /// Checks if the target legalizer info has specified anything about the
+  /// instruction, or if unsupported.
+  bool isInstUnsupported(const LegalityQuery &Query) const {
+    using namespace LegalizeActions;
+    auto Step = LI.getAction(Query);
+    return Step.Action == Unsupported || Step.Action == NotFound;
+  }
+};
+
+} // namespace llvm
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Legalizer.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Legalizer.h
new file mode 100644
index 0000000..8284ab6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Legalizer.h
@@ -0,0 +1,65 @@
+//== llvm/CodeGen/GlobalISel/LegalizePass.h ------------- -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file A pass to convert the target-illegal operations created by IR -> MIR
+/// translation into ones the target expects to be able to select. This may
+/// occur in multiple phases, for example G_ADD <2 x i8> -> G_ADD <2 x i16> ->
+/// G_ADD <4 x i16>.
+///
+/// The LegalizeHelper class is where most of the work happens, and is designed
+/// to be callable from other passes that find themselves with an illegal
+/// instruction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_LEGALIZEMACHINEIRPASS_H
+#define LLVM_CODEGEN_GLOBALISEL_LEGALIZEMACHINEIRPASS_H
+
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+class MachineRegisterInfo;
+
+class Legalizer : public MachineFunctionPass {
+public:
+  static char ID;
+
+private:
+
+  /// Initialize the field members using \p MF.
+  void init(MachineFunction &MF);
+
+public:
+  // Ctor, nothing fancy.
+  Legalizer();
+
+  StringRef getPassName() const override { return "Legalizer"; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  MachineFunctionProperties getRequiredProperties() const override {
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::IsSSA);
+  }
+
+  MachineFunctionProperties getSetProperties() const override {
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::Legalized);
+  }
+
+  bool combineExtracts(MachineInstr &MI, MachineRegisterInfo &MRI,
+                       const TargetInstrInfo &TII);
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // End namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
new file mode 100644
index 0000000..8bd8a9d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -0,0 +1,115 @@
+//== llvm/CodeGen/GlobalISel/LegalizerHelper.h ---------------- -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file A pass to convert the target-illegal operations created by IR -> MIR
+/// translation into ones the target expects to be able to select. This may
+/// occur in multiple phases, for example G_ADD <2 x i8> -> G_ADD <2 x i16> ->
+/// G_ADD <4 x i16>.
+///
+/// The LegalizerHelper class is where most of the work happens, and is
+/// designed to be callable from other passes that find themselves with an
+/// illegal instruction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_MACHINELEGALIZEHELPER_H
+#define LLVM_CODEGEN_GLOBALISEL_MACHINELEGALIZEHELPER_H
+
+#include "llvm/CodeGen/GlobalISel/CallLowering.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/LowLevelType.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/RuntimeLibcalls.h"
+
+namespace llvm {
+// Forward declarations.
+class LegalizerInfo;
+class Legalizer;
+class MachineRegisterInfo;
+
+class LegalizerHelper {
+public:
+  enum LegalizeResult {
+    /// Instruction was already legal and no change was made to the
+    /// MachineFunction.
+    AlreadyLegal,
+
+    /// Instruction has been legalized and the MachineFunction changed.
+    Legalized,
+
+    /// Some kind of error has occurred and we could not legalize this
+    /// instruction.
+    UnableToLegalize,
+  };
+
+  LegalizerHelper(MachineFunction &MF);
+
+  /// Replace \p MI by a sequence of legal instructions that can implement the
+  /// same operation. Note that this means \p MI may be deleted, so any iterator
+  /// steps should be performed before calling this function. \p Helper should
+  /// be initialized to the MachineFunction containing \p MI.
+  ///
+  /// Considered as an opaque blob, the legal code will use and define the same
+  /// registers as \p MI.
+  LegalizeResult legalizeInstrStep(MachineInstr &MI);
+
+  /// Legalize an instruction by emiting a runtime library call instead.
+  LegalizeResult libcall(MachineInstr &MI);
+
+  /// Legalize an instruction by reducing the width of the underlying scalar
+  /// type.
+  LegalizeResult narrowScalar(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
+
+  /// Legalize an instruction by performing the operation on a wider scalar type
+  /// (for example a 16-bit addition can be safely performed at 32-bits
+  /// precision, ignoring the unused bits).
+  LegalizeResult widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
+
+  /// Legalize an instruction by splitting it into simpler parts, hopefully
+  /// understood by the target.
+  LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+
+  /// Legalize a vector instruction by splitting into multiple components, each
+  /// acting on the same scalar type as the original but with fewer elements.
+  LegalizeResult fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
+                                     LLT NarrowTy);
+
+  /// Legalize a vector instruction by increasing the number of vector elements
+  /// involved and ignoring the added elements later.
+  LegalizeResult moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
+                                    LLT WideTy);
+
+  /// Expose MIRBuilder so clients can set their own RecordInsertInstruction
+  /// functions
+  MachineIRBuilder MIRBuilder;
+
+  /// Expose LegalizerInfo so the clients can re-use.
+  const LegalizerInfo &getLegalizerInfo() const { return LI; }
+
+private:
+
+  /// Helper function to split a wide generic register into bitwise blocks with
+  /// the given Type (which implies the number of blocks needed). The generic
+  /// registers created are appended to Ops, starting at bit 0 of Reg.
+  void extractParts(unsigned Reg, LLT Ty, int NumParts,
+                    SmallVectorImpl<unsigned> &Ops);
+
+  MachineRegisterInfo &MRI;
+  const LegalizerInfo &LI;
+};
+
+/// Helper function that creates the given libcall.
+LegalizerHelper::LegalizeResult
+createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
+              const CallLowering::ArgInfo &Result,
+              ArrayRef<CallLowering::ArgInfo> Args);
+
+} // End namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
new file mode 100644
index 0000000..117c791
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -0,0 +1,920 @@
+//===- llvm/CodeGen/GlobalISel/LegalizerInfo.h ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// Interface for Targets to specify which operations they can successfully
+/// select and how the others should be expanded most efficiently.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_LEGALIZERINFO_H
+#define LLVM_CODEGEN_GLOBALISEL_LEGALIZERINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/LowLevelTypeImpl.h"
+#include <cassert>
+#include <cstdint>
+#include <tuple>
+#include <unordered_map>
+#include <utility>
+
+namespace llvm {
+
+extern cl::opt<bool> DisableGISelLegalityCheck;
+
+class MachineInstr;
+class MachineIRBuilder;
+class MachineRegisterInfo;
+
+namespace LegalizeActions {
+enum LegalizeAction : std::uint8_t {
+  /// The operation is expected to be selectable directly by the target, and
+  /// no transformation is necessary.
+  Legal,
+
+  /// The operation should be synthesized from multiple instructions acting on
+  /// a narrower scalar base-type. For example a 64-bit add might be
+  /// implemented in terms of 32-bit add-with-carry.
+  NarrowScalar,
+
+  /// The operation should be implemented in terms of a wider scalar
+  /// base-type. For example a <2 x s8> add could be implemented as a <2
+  /// x s32> add (ignoring the high bits).
+  WidenScalar,
+
+  /// The (vector) operation should be implemented by splitting it into
+  /// sub-vectors where the operation is legal. For example a <8 x s64> add
+  /// might be implemented as 4 separate <2 x s64> adds.
+  FewerElements,
+
+  /// The (vector) operation should be implemented by widening the input
+  /// vector and ignoring the lanes added by doing so. For example <2 x i8> is
+  /// rarely legal, but you might perform an <8 x i8> and then only look at
+  /// the first two results.
+  MoreElements,
+
+  /// The operation itself must be expressed in terms of simpler actions on
+  /// this target. E.g. a SREM replaced by an SDIV and subtraction.
+  Lower,
+
+  /// The operation should be implemented as a call to some kind of runtime
+  /// support library. For example this usually happens on machines that don't
+  /// support floating-point operations natively.
+  Libcall,
+
+  /// The target wants to do something special with this combination of
+  /// operand and type. A callback will be issued when it is needed.
+  Custom,
+
+  /// This operation is completely unsupported on the target. A programming
+  /// error has occurred.
+  Unsupported,
+
+  /// Sentinel value for when no action was found in the specified table.
+  NotFound,
+
+  /// Fall back onto the old rules.
+  /// TODO: Remove this once we've migrated
+  UseLegacyRules,
+};
+} // end namespace LegalizeActions
+
+using LegalizeActions::LegalizeAction;
+
+/// Legalization is decided based on an instruction's opcode, which type slot
+/// we're considering, and what the existing type is. These aspects are gathered
+/// together for convenience in the InstrAspect class.
+struct InstrAspect {
+  unsigned Opcode;
+  unsigned Idx = 0;
+  LLT Type;
+
+  InstrAspect(unsigned Opcode, LLT Type) : Opcode(Opcode), Type(Type) {}
+  InstrAspect(unsigned Opcode, unsigned Idx, LLT Type)
+      : Opcode(Opcode), Idx(Idx), Type(Type) {}
+
+  bool operator==(const InstrAspect &RHS) const {
+    return Opcode == RHS.Opcode && Idx == RHS.Idx && Type == RHS.Type;
+  }
+};
+
+/// The LegalityQuery object bundles together all the information that's needed
+/// to decide whether a given operation is legal or not.
+/// For efficiency, it doesn't make a copy of Types so care must be taken not
+/// to free it before using the query.
+struct LegalityQuery {
+  unsigned Opcode;
+  ArrayRef<LLT> Types;
+
+  raw_ostream &print(raw_ostream &OS) const;
+};
+
+/// The result of a query. It either indicates a final answer of Legal or
+/// Unsupported or describes an action that must be taken to make an operation
+/// more legal.
+struct LegalizeActionStep {
+  /// The action to take or the final answer.
+  LegalizeAction Action;
+  /// If describing an action, the type index to change. Otherwise zero.
+  unsigned TypeIdx;
+  /// If describing an action, the new type for TypeIdx. Otherwise LLT{}.
+  LLT NewType;
+
+  LegalizeActionStep(LegalizeAction Action, unsigned TypeIdx,
+                     const LLT &NewType)
+      : Action(Action), TypeIdx(TypeIdx), NewType(NewType) {}
+
+  bool operator==(const LegalizeActionStep &RHS) const {
+    return std::tie(Action, TypeIdx, NewType) ==
+        std::tie(RHS.Action, RHS.TypeIdx, RHS.NewType);
+  }
+};
+
+using LegalityPredicate = std::function<bool (const LegalityQuery &)>;
+using LegalizeMutation =
+    std::function<std::pair<unsigned, LLT>(const LegalityQuery &)>;
+
+namespace LegalityPredicates {
+/// True iff P0 and P1 are true.
+LegalityPredicate all(LegalityPredicate P0, LegalityPredicate P1);
+/// True iff the given type index is one of the specified types.
+LegalityPredicate typeInSet(unsigned TypeIdx,
+                            std::initializer_list<LLT> TypesInit);
+/// True iff the given types for the given pair of type indexes is one of the
+/// specified type pairs.
+LegalityPredicate
+typePairInSet(unsigned TypeIdx0, unsigned TypeIdx1,
+              std::initializer_list<std::pair<LLT, LLT>> TypesInit);
+/// True iff the specified type index is a scalar.
+LegalityPredicate isScalar(unsigned TypeIdx);
+/// True iff the specified type index is a scalar that's narrower than the given
+/// size.
+LegalityPredicate narrowerThan(unsigned TypeIdx, unsigned Size);
+/// True iff the specified type index is a scalar that's wider than the given
+/// size.
+LegalityPredicate widerThan(unsigned TypeIdx, unsigned Size);
+/// True iff the specified type index is a scalar whose size is not a power of
+/// 2.
+LegalityPredicate sizeNotPow2(unsigned TypeIdx);
+/// True iff the specified type index is a vector whose element count is not a
+/// power of 2.
+LegalityPredicate numElementsNotPow2(unsigned TypeIdx);
+} // end namespace LegalityPredicates
+
+namespace LegalizeMutations {
+/// Select this specific type for the given type index.
+LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty);
+/// Widen the type for the given type index to the next power of 2.
+LegalizeMutation widenScalarToNextPow2(unsigned TypeIdx, unsigned Min = 0);
+/// Add more elements to the type for the given type index to the next power of
+/// 2.
+LegalizeMutation moreElementsToNextPow2(unsigned TypeIdx, unsigned Min = 0);
+} // end namespace LegalizeMutations
+
+/// A single rule in a legalizer info ruleset.
+/// The specified action is chosen when the predicate is true. Where appropriate
+/// for the action (e.g. for WidenScalar) the new type is selected using the
+/// given mutator.
+class LegalizeRule {
+  LegalityPredicate Predicate;
+  LegalizeAction Action;
+  LegalizeMutation Mutation;
+
+public:
+  LegalizeRule(LegalityPredicate Predicate, LegalizeAction Action,
+               LegalizeMutation Mutation = nullptr)
+      : Predicate(Predicate), Action(Action), Mutation(Mutation) {}
+
+  /// Test whether the LegalityQuery matches.
+  bool match(const LegalityQuery &Query) const {
+    return Predicate(Query);
+  }
+
+  LegalizeAction getAction() const { return Action; }
+
+  /// Determine the change to make.
+  std::pair<unsigned, LLT> determineMutation(const LegalityQuery &Query) const {
+    if (Mutation)
+      return Mutation(Query);
+    return std::make_pair(0, LLT{});
+  }
+};
+
+class LegalizeRuleSet {
+  /// When non-zero, the opcode we are an alias of
+  unsigned AliasOf;
+  /// If true, there is another opcode that aliases this one
+  bool IsAliasedByAnother;
+  SmallVector<LegalizeRule, 2> Rules;
+
+  void add(const LegalizeRule &Rule) {
+    assert(AliasOf == 0 &&
+           "RuleSet is aliased, change the representative opcode instead");
+    Rules.push_back(Rule);
+  }
+
+  static bool always(const LegalityQuery &) { return true; }
+
+  /// Use the given action when the predicate is true.
+  /// Action should not be an action that requires mutation.
+  LegalizeRuleSet &actionIf(LegalizeAction Action,
+                            LegalityPredicate Predicate) {
+    add({Predicate, Action});
+    return *this;
+  }
+  /// Use the given action when the predicate is true.
+  /// Action should not be an action that requires mutation.
+  LegalizeRuleSet &actionIf(LegalizeAction Action, LegalityPredicate Predicate,
+                            LegalizeMutation Mutation) {
+    add({Predicate, Action, Mutation});
+    return *this;
+  }
+  /// Use the given action when type index 0 is any type in the given list.
+  /// Action should not be an action that requires mutation.
+  LegalizeRuleSet &actionFor(LegalizeAction Action,
+                             std::initializer_list<LLT> Types) {
+    using namespace LegalityPredicates;
+    return actionIf(Action, typeInSet(0, Types));
+  }
+  /// Use the given action when type indexes 0 and 1 is any type pair in the
+  /// given list.
+  /// Action should not be an action that requires mutation.
+  LegalizeRuleSet &
+  actionFor(LegalizeAction Action,
+            std::initializer_list<std::pair<LLT, LLT>> Types) {
+    using namespace LegalityPredicates;
+    return actionIf(Action, typePairInSet(0, 1, Types));
+  }
+  /// Use the given action when type indexes 0 and 1 are both in the given list.
+  /// That is, the type pair is in the cartesian product of the list.
+  /// Action should not be an action that requires mutation.
+  LegalizeRuleSet &actionForCartesianProduct(LegalizeAction Action,
+                                             std::initializer_list<LLT> Types) {
+    using namespace LegalityPredicates;
+    return actionIf(Action, all(typeInSet(0, Types), typeInSet(1, Types)));
+  }
+  /// Use the given action when type indexes 0 and 1 are both their respective
+  /// lists.
+  /// That is, the type pair is in the cartesian product of the lists
+  /// Action should not be an action that requires mutation.
+  LegalizeRuleSet &
+  actionForCartesianProduct(LegalizeAction Action,
+                            std::initializer_list<LLT> Types0,
+                            std::initializer_list<LLT> Types1) {
+    using namespace LegalityPredicates;
+    return actionIf(Action, all(typeInSet(0, Types0), typeInSet(1, Types1)));
+  }
+
+public:
+  LegalizeRuleSet() : AliasOf(0), IsAliasedByAnother(false), Rules() {}
+
+  bool isAliasedByAnother() { return IsAliasedByAnother; }
+  void setIsAliasedByAnother() { IsAliasedByAnother = true; }
+  void aliasTo(unsigned Opcode) {
+    assert((AliasOf == 0 || AliasOf == Opcode) &&
+           "Opcode is already aliased to another opcode");
+    assert(Rules.empty() && "Aliasing will discard rules");
+    AliasOf = Opcode;
+  }
+  unsigned getAlias() const { return AliasOf; }
+
+  /// The instruction is legal if predicate is true.
+  LegalizeRuleSet &legalIf(LegalityPredicate Predicate) {
+    return actionIf(LegalizeAction::Legal, Predicate);
+  }
+  /// The instruction is legal when type index 0 is any type in the given list.
+  LegalizeRuleSet &legalFor(std::initializer_list<LLT> Types) {
+    return actionFor(LegalizeAction::Legal, Types);
+  }
+  /// The instruction is legal when type indexes 0 and 1 is any type pair in the
+  /// given list.
+  LegalizeRuleSet &legalFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
+    return actionFor(LegalizeAction::Legal, Types);
+  }
+  /// The instruction is legal when type indexes 0 and 1 are both in the given
+  /// list. That is, the type pair is in the cartesian product of the list.
+  LegalizeRuleSet &legalForCartesianProduct(std::initializer_list<LLT> Types) {
+    return actionForCartesianProduct(LegalizeAction::Legal, Types);
+  }
+  /// The instruction is legal when type indexes 0 and 1 are both their
+  /// respective lists.
+  LegalizeRuleSet &legalForCartesianProduct(std::initializer_list<LLT> Types0,
+                                            std::initializer_list<LLT> Types1) {
+    return actionForCartesianProduct(LegalizeAction::Legal, Types0, Types1);
+  }
+
+  /// Like legalIf, but for the Libcall action.
+  LegalizeRuleSet &libcallIf(LegalityPredicate Predicate) {
+    return actionIf(LegalizeAction::Libcall, Predicate);
+  }
+  LegalizeRuleSet &libcallFor(std::initializer_list<LLT> Types) {
+    return actionFor(LegalizeAction::Libcall, Types);
+  }
+  LegalizeRuleSet &
+  libcallFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
+    return actionFor(LegalizeAction::Libcall, Types);
+  }
+  LegalizeRuleSet &
+  libcallForCartesianProduct(std::initializer_list<LLT> Types) {
+    return actionForCartesianProduct(LegalizeAction::Libcall, Types);
+  }
+  LegalizeRuleSet &
+  libcallForCartesianProduct(std::initializer_list<LLT> Types0,
+                             std::initializer_list<LLT> Types1) {
+    return actionForCartesianProduct(LegalizeAction::Libcall, Types0, Types1);
+  }
+
+  /// Widen the scalar to the one selected by the mutation if the predicate is
+  /// true.
+  LegalizeRuleSet &widenScalarIf(LegalityPredicate Predicate,
+                                 LegalizeMutation Mutation) {
+    return actionIf(LegalizeAction::WidenScalar, Predicate, Mutation);
+  }
+  /// Narrow the scalar to the one selected by the mutation if the predicate is
+  /// true.
+  LegalizeRuleSet &narrowScalarIf(LegalityPredicate Predicate,
+                                  LegalizeMutation Mutation) {
+    return actionIf(LegalizeAction::NarrowScalar, Predicate, Mutation);
+  }
+
+  /// Add more elements to reach the type selected by the mutation if the
+  /// predicate is true.
+  LegalizeRuleSet &moreElementsIf(LegalityPredicate Predicate,
+                                  LegalizeMutation Mutation) {
+    return actionIf(LegalizeAction::MoreElements, Predicate, Mutation);
+  }
+  /// Remove elements to reach the type selected by the mutation if the
+  /// predicate is true.
+  LegalizeRuleSet &fewerElementsIf(LegalityPredicate Predicate,
+                                   LegalizeMutation Mutation) {
+    return actionIf(LegalizeAction::FewerElements, Predicate, Mutation);
+  }
+
+  /// The instruction is unsupported.
+  LegalizeRuleSet &unsupported() {
+    return actionIf(LegalizeAction::Unsupported, always);
+  }
+  LegalizeRuleSet &unsupportedIf(LegalityPredicate Predicate) {
+    return actionIf(LegalizeAction::Unsupported, Predicate);
+  }
+
+  LegalizeRuleSet &customIf(LegalityPredicate Predicate) {
+    return actionIf(LegalizeAction::Custom, Predicate);
+  }
+  LegalizeRuleSet &customFor(std::initializer_list<LLT> Types) {
+    return actionFor(LegalizeAction::Custom, Types);
+  }
+  LegalizeRuleSet &customForCartesianProduct(std::initializer_list<LLT> Types) {
+    return actionForCartesianProduct(LegalizeAction::Custom, Types);
+  }
+  LegalizeRuleSet &
+  customForCartesianProduct(std::initializer_list<LLT> Types0,
+                            std::initializer_list<LLT> Types1) {
+    return actionForCartesianProduct(LegalizeAction::Custom, Types0, Types1);
+  }
+
+  /// Widen the scalar to the next power of two that is at least MinSize.
+  /// No effect if the type is not a scalar or is a power of two.
+  LegalizeRuleSet &widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize = 0) {
+    using namespace LegalityPredicates;
+    return widenScalarIf(
+        sizeNotPow2(TypeIdx),
+        LegalizeMutations::widenScalarToNextPow2(TypeIdx, MinSize));
+  }
+
+  LegalizeRuleSet &narrowScalar(unsigned TypeIdx, LegalizeMutation Mutation) {
+    using namespace LegalityPredicates;
+    return narrowScalarIf(isScalar(TypeIdx), Mutation);
+  }
+
+  /// Ensure the scalar is at least as wide as Ty.
+  LegalizeRuleSet &minScalar(unsigned TypeIdx, const LLT &Ty) {
+    using namespace LegalityPredicates;
+    using namespace LegalizeMutations;
+    return widenScalarIf(narrowerThan(TypeIdx, Ty.getSizeInBits()),
+                         changeTo(TypeIdx, Ty));
+  }
+
+  /// Ensure the scalar is at most as wide as Ty.
+  LegalizeRuleSet &maxScalar(unsigned TypeIdx, const LLT &Ty) {
+    using namespace LegalityPredicates;
+    using namespace LegalizeMutations;
+    return narrowScalarIf(widerThan(TypeIdx, Ty.getSizeInBits()),
+                          changeTo(TypeIdx, Ty));
+  }
+
+  /// Conditionally limit the maximum size of the scalar.
+  /// For example, when the maximum size of one type depends on the size of
+  /// another such as extracting N bits from an M bit container.
+  LegalizeRuleSet &maxScalarIf(LegalityPredicate Predicate, unsigned TypeIdx, const LLT &Ty) {
+    using namespace LegalityPredicates;
+    using namespace LegalizeMutations;
+    return narrowScalarIf(
+        [=](const LegalityQuery &Query) {
+          return widerThan(TypeIdx, Ty.getSizeInBits()) &&
+                 Predicate(Query);
+        },
+        changeTo(TypeIdx, Ty));
+  }
+
+  /// Limit the range of scalar sizes to MinTy and MaxTy.
+  LegalizeRuleSet &clampScalar(unsigned TypeIdx, const LLT &MinTy, const LLT &MaxTy) {
+    assert(MinTy.isScalar() && MaxTy.isScalar() && "Expected scalar types");
+
+    return minScalar(TypeIdx, MinTy)
+        .maxScalar(TypeIdx, MaxTy);
+  }
+
+  /// Add more elements to the vector to reach the next power of two.
+  /// No effect if the type is not a vector or the element count is a power of
+  /// two.
+  LegalizeRuleSet &moreElementsToNextPow2(unsigned TypeIdx) {
+    using namespace LegalityPredicates;
+    return moreElementsIf(numElementsNotPow2(TypeIdx),
+                          LegalizeMutations::moreElementsToNextPow2(TypeIdx));
+  }
+
+  /// Limit the number of elements in EltTy vectors to at least MinElements.
+  LegalizeRuleSet &clampMinNumElements(unsigned TypeIdx, const LLT &EltTy,
+                                       unsigned MinElements) {
+    return moreElementsIf(
+        [=](const LegalityQuery &Query) {
+          LLT VecTy = Query.Types[TypeIdx];
+          return VecTy.getElementType() == EltTy &&
+                 VecTy.getNumElements() < MinElements;
+        },
+        [=](const LegalityQuery &Query) {
+          LLT VecTy = Query.Types[TypeIdx];
+          return std::make_pair(
+              TypeIdx, LLT::vector(MinElements, VecTy.getScalarSizeInBits()));
+        });
+  }
+  /// Limit the number of elements in EltTy vectors to at most MaxElements.
+  LegalizeRuleSet &clampMaxNumElements(unsigned TypeIdx, const LLT &EltTy,
+                                       unsigned MaxElements) {
+    return fewerElementsIf(
+        [=](const LegalityQuery &Query) {
+          LLT VecTy = Query.Types[TypeIdx];
+          return VecTy.getElementType() == EltTy &&
+                 VecTy.getNumElements() > MaxElements;
+        },
+        [=](const LegalityQuery &Query) {
+          LLT VecTy = Query.Types[TypeIdx];
+          return std::make_pair(
+              TypeIdx, LLT::vector(MaxElements, VecTy.getScalarSizeInBits()));
+        });
+  }
+  /// Limit the number of elements for the given vectors to at least MinTy's
+  /// number of elements and at most MaxTy's number of elements.
+  ///
+  /// No effect if the type is not a vector or does not have the same element
+  /// type as the constraints.
+  /// The element type of MinTy and MaxTy must match.
+  LegalizeRuleSet &clampNumElements(unsigned TypeIdx, const LLT &MinTy,
+                                    const LLT &MaxTy) {
+    assert(MinTy.getElementType() == MaxTy.getElementType() &&
+           "Expected element types to agree");
+
+    const LLT &EltTy = MinTy.getElementType();
+    return clampMinNumElements(TypeIdx, EltTy, MinTy.getNumElements())
+        .clampMaxNumElements(TypeIdx, EltTy, MaxTy.getNumElements());
+  }
+
+  /// Fallback on the previous implementation. This should only be used while
+  /// porting a rule.
+  LegalizeRuleSet &fallback() {
+    add({always, LegalizeAction::UseLegacyRules});
+    return *this;
+  }
+
+  /// Apply the ruleset to the given LegalityQuery.
+  LegalizeActionStep apply(const LegalityQuery &Query) const;
+};
+
+class LegalizerInfo {
+public:
+  LegalizerInfo();
+  virtual ~LegalizerInfo() = default;
+
+  unsigned getOpcodeIdxForOpcode(unsigned Opcode) const;
+  unsigned getActionDefinitionsIdx(unsigned Opcode) const;
+
+  /// Compute any ancillary tables needed to quickly decide how an operation
+  /// should be handled. This must be called after all "set*Action"methods but
+  /// before any query is made or incorrect results may be returned.
+  void computeTables();
+
+  static bool needsLegalizingToDifferentSize(const LegalizeAction Action) {
+    using namespace LegalizeActions;
+    switch (Action) {
+    case NarrowScalar:
+    case WidenScalar:
+    case FewerElements:
+    case MoreElements:
+    case Unsupported:
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  using SizeAndAction = std::pair<uint16_t, LegalizeAction>;
+  using SizeAndActionsVec = std::vector<SizeAndAction>;
+  using SizeChangeStrategy =
+      std::function<SizeAndActionsVec(const SizeAndActionsVec &v)>;
+
+  /// More friendly way to set an action for common types that have an LLT
+  /// representation.
+  /// The LegalizeAction must be one for which NeedsLegalizingToDifferentSize
+  /// returns false.
+  void setAction(const InstrAspect &Aspect, LegalizeAction Action) {
+    assert(!needsLegalizingToDifferentSize(Action));
+    TablesInitialized = false;
+    const unsigned OpcodeIdx = Aspect.Opcode - FirstOp;
+    if (SpecifiedActions[OpcodeIdx].size() <= Aspect.Idx)
+      SpecifiedActions[OpcodeIdx].resize(Aspect.Idx + 1);
+    SpecifiedActions[OpcodeIdx][Aspect.Idx][Aspect.Type] = Action;
+  }
+
+  /// The setAction calls record the non-size-changing legalization actions
+  /// to take on specificly-sized types. The SizeChangeStrategy defines what
+  /// to do when the size of the type needs to be changed to reach a legally
+  /// sized type (i.e., one that was defined through a setAction call).
+  /// e.g.
+  /// setAction ({G_ADD, 0, LLT::scalar(32)}, Legal);
+  /// setLegalizeScalarToDifferentSizeStrategy(
+  ///   G_ADD, 0, widenToLargerTypesAndNarrowToLargest);
+  /// will end up defining getAction({G_ADD, 0, T}) to return the following 
+  /// actions for different scalar types T:
+  ///  LLT::scalar(1)..LLT::scalar(31): {WidenScalar, 0, LLT::scalar(32)}
+  ///  LLT::scalar(32):                 {Legal, 0, LLT::scalar(32)}
+  ///  LLT::scalar(33)..:               {NarrowScalar, 0, LLT::scalar(32)}
+  ///
+  /// If no SizeChangeAction gets defined, through this function,
+  /// the default is unsupportedForDifferentSizes.
+  void setLegalizeScalarToDifferentSizeStrategy(const unsigned Opcode,
+                                                const unsigned TypeIdx,
+                                                SizeChangeStrategy S) {
+    const unsigned OpcodeIdx = Opcode - FirstOp;
+    if (ScalarSizeChangeStrategies[OpcodeIdx].size() <= TypeIdx)
+      ScalarSizeChangeStrategies[OpcodeIdx].resize(TypeIdx + 1);
+    ScalarSizeChangeStrategies[OpcodeIdx][TypeIdx] = S;
+  }
+
+  /// See also setLegalizeScalarToDifferentSizeStrategy.
+  /// This function allows to set the SizeChangeStrategy for vector elements.
+  void setLegalizeVectorElementToDifferentSizeStrategy(const unsigned Opcode,
+                                                       const unsigned TypeIdx,
+                                                       SizeChangeStrategy S) {
+    const unsigned OpcodeIdx = Opcode - FirstOp;
+    if (VectorElementSizeChangeStrategies[OpcodeIdx].size() <= TypeIdx)
+      VectorElementSizeChangeStrategies[OpcodeIdx].resize(TypeIdx + 1);
+    VectorElementSizeChangeStrategies[OpcodeIdx][TypeIdx] = S;
+  }
+
+  /// A SizeChangeStrategy for the common case where legalization for a 
+  /// particular operation consists of only supporting a specific set of type
+  /// sizes. E.g.
+  ///   setAction ({G_DIV, 0, LLT::scalar(32)}, Legal);
+  ///   setAction ({G_DIV, 0, LLT::scalar(64)}, Legal);
+  ///   setLegalizeScalarToDifferentSizeStrategy(
+  ///     G_DIV, 0, unsupportedForDifferentSizes);
+  /// will result in getAction({G_DIV, 0, T}) to return Legal for s32 and s64,
+  /// and Unsupported for all other scalar types T.
+  static SizeAndActionsVec
+  unsupportedForDifferentSizes(const SizeAndActionsVec &v) {
+    using namespace LegalizeActions;
+    return increaseToLargerTypesAndDecreaseToLargest(v, Unsupported,
+                                                     Unsupported);
+  }
+
+  /// A SizeChangeStrategy for the common case where legalization for a
+  /// particular operation consists of widening the type to a large legal type,
+  /// unless there is no such type and then instead it should be narrowed to the
+  /// largest legal type.
+  static SizeAndActionsVec
+  widenToLargerTypesAndNarrowToLargest(const SizeAndActionsVec &v) {
+    using namespace LegalizeActions;
+    assert(v.size() > 0 &&
+           "At least one size that can be legalized towards is needed"
+           " for this SizeChangeStrategy");
+    return increaseToLargerTypesAndDecreaseToLargest(v, WidenScalar,
+                                                     NarrowScalar);
+  }
+
+  static SizeAndActionsVec
+  widenToLargerTypesUnsupportedOtherwise(const SizeAndActionsVec &v) {
+    using namespace LegalizeActions;
+    return increaseToLargerTypesAndDecreaseToLargest(v, WidenScalar,
+                                                     Unsupported);
+  }
+
+  static SizeAndActionsVec
+  narrowToSmallerAndUnsupportedIfTooSmall(const SizeAndActionsVec &v) {
+    using namespace LegalizeActions;
+    return decreaseToSmallerTypesAndIncreaseToSmallest(v, NarrowScalar,
+                                                       Unsupported);
+  }
+
+  static SizeAndActionsVec
+  narrowToSmallerAndWidenToSmallest(const SizeAndActionsVec &v) {
+    using namespace LegalizeActions;
+    assert(v.size() > 0 &&
+           "At least one size that can be legalized towards is needed"
+           " for this SizeChangeStrategy");
+    return decreaseToSmallerTypesAndIncreaseToSmallest(v, NarrowScalar,
+                                                       WidenScalar);
+  }
+
+  /// A SizeChangeStrategy for the common case where legalization for a
+  /// particular vector operation consists of having more elements in the
+  /// vector, to a type that is legal. Unless there is no such type and then
+  /// instead it should be legalized towards the widest vector that's still
+  /// legal. E.g.
+  ///   setAction({G_ADD, LLT::vector(8, 8)}, Legal);
+  ///   setAction({G_ADD, LLT::vector(16, 8)}, Legal);
+  ///   setAction({G_ADD, LLT::vector(2, 32)}, Legal);
+  ///   setAction({G_ADD, LLT::vector(4, 32)}, Legal);
+  ///   setLegalizeVectorElementToDifferentSizeStrategy(
+  ///     G_ADD, 0, moreToWiderTypesAndLessToWidest);
+  /// will result in the following getAction results:
+  ///   * getAction({G_ADD, LLT::vector(8,8)}) returns
+  ///       (Legal, vector(8,8)).
+  ///   * getAction({G_ADD, LLT::vector(9,8)}) returns
+  ///       (MoreElements, vector(16,8)).
+  ///   * getAction({G_ADD, LLT::vector(8,32)}) returns
+  ///       (FewerElements, vector(4,32)).
+  static SizeAndActionsVec
+  moreToWiderTypesAndLessToWidest(const SizeAndActionsVec &v) {
+    using namespace LegalizeActions;
+    return increaseToLargerTypesAndDecreaseToLargest(v, MoreElements,
+                                                     FewerElements);
+  }
+
+  /// Helper function to implement many typical SizeChangeStrategy functions.
+  static SizeAndActionsVec
+  increaseToLargerTypesAndDecreaseToLargest(const SizeAndActionsVec &v,
+                                            LegalizeAction IncreaseAction,
+                                            LegalizeAction DecreaseAction);
+  /// Helper function to implement many typical SizeChangeStrategy functions.
+  static SizeAndActionsVec
+  decreaseToSmallerTypesAndIncreaseToSmallest(const SizeAndActionsVec &v,
+                                              LegalizeAction DecreaseAction,
+                                              LegalizeAction IncreaseAction);
+
+  /// Get the action definitions for the given opcode. Use this to run a
+  /// LegalityQuery through the definitions.
+  const LegalizeRuleSet &getActionDefinitions(unsigned Opcode) const;
+
+  /// Get the action definition builder for the given opcode. Use this to define
+  /// the action definitions.
+  ///
+  /// It is an error to request an opcode that has already been requested by the
+  /// multiple-opcode variant.
+  LegalizeRuleSet &getActionDefinitionsBuilder(unsigned Opcode);
+
+  /// Get the action definition builder for the given set of opcodes. Use this
+  /// to define the action definitions for multiple opcodes at once. The first
+  /// opcode given will be considered the representative opcode and will hold
+  /// the definitions whereas the other opcodes will be configured to refer to
+  /// the representative opcode. This lowers memory requirements and very
+  /// slightly improves performance.
+  ///
+  /// It would be very easy to introduce unexpected side-effects as a result of
+  /// this aliasing if it were permitted to request different but intersecting
+  /// sets of opcodes but that is difficult to keep track of. It is therefore an
+  /// error to request the same opcode twice using this API, to request an
+  /// opcode that already has definitions, or to use the single-opcode API on an
+  /// opcode that has already been requested by this API.
+  LegalizeRuleSet &
+  getActionDefinitionsBuilder(std::initializer_list<unsigned> Opcodes);
+  void aliasActionDefinitions(unsigned OpcodeTo, unsigned OpcodeFrom);
+
+  /// Determine what action should be taken to legalize the described
+  /// instruction. Requires computeTables to have been called.
+  ///
+  /// \returns a description of the next legalization step to perform.
+  LegalizeActionStep getAction(const LegalityQuery &Query) const;
+
+  /// Determine what action should be taken to legalize the given generic
+  /// instruction.
+  ///
+  /// \returns a description of the next legalization step to perform.
+  LegalizeActionStep getAction(const MachineInstr &MI,
+                               const MachineRegisterInfo &MRI) const;
+
+  bool isLegal(const MachineInstr &MI, const MachineRegisterInfo &MRI) const;
+
+  virtual bool legalizeCustom(MachineInstr &MI,
+                              MachineRegisterInfo &MRI,
+                              MachineIRBuilder &MIRBuilder) const;
+
+private:
+  /// Determine what action should be taken to legalize the given generic
+  /// instruction opcode, type-index and type. Requires computeTables to have
+  /// been called.
+  ///
+  /// \returns a pair consisting of the kind of legalization that should be
+  /// performed and the destination type.
+  std::pair<LegalizeAction, LLT>
+  getAspectAction(const InstrAspect &Aspect) const;
+
+  /// The SizeAndActionsVec is a representation mapping between all natural
+  /// numbers and an Action. The natural number represents the bit size of
+  /// the InstrAspect. For example, for a target with native support for 32-bit
+  /// and 64-bit additions, you'd express that as:
+  /// setScalarAction(G_ADD, 0,
+  ///           {{1, WidenScalar},  // bit sizes [ 1, 31[
+  ///            {32, Legal},       // bit sizes [32, 33[
+  ///            {33, WidenScalar}, // bit sizes [33, 64[
+  ///            {64, Legal},       // bit sizes [64, 65[
+  ///            {65, NarrowScalar} // bit sizes [65, +inf[
+  ///           });
+  /// It may be that only 64-bit pointers are supported on your target:
+  /// setPointerAction(G_GEP, 0, LLT:pointer(1),
+  ///           {{1, Unsupported},  // bit sizes [ 1, 63[
+  ///            {64, Legal},       // bit sizes [64, 65[
+  ///            {65, Unsupported}, // bit sizes [65, +inf[
+  ///           });
+  void setScalarAction(const unsigned Opcode, const unsigned TypeIndex,
+                       const SizeAndActionsVec &SizeAndActions) {
+    const unsigned OpcodeIdx = Opcode - FirstOp;
+    SmallVector<SizeAndActionsVec, 1> &Actions = ScalarActions[OpcodeIdx];
+    setActions(TypeIndex, Actions, SizeAndActions);
+  }
+  void setPointerAction(const unsigned Opcode, const unsigned TypeIndex,
+                        const unsigned AddressSpace,
+                        const SizeAndActionsVec &SizeAndActions) {
+    const unsigned OpcodeIdx = Opcode - FirstOp;
+    if (AddrSpace2PointerActions[OpcodeIdx].find(AddressSpace) ==
+        AddrSpace2PointerActions[OpcodeIdx].end())
+      AddrSpace2PointerActions[OpcodeIdx][AddressSpace] = {{}};
+    SmallVector<SizeAndActionsVec, 1> &Actions =
+        AddrSpace2PointerActions[OpcodeIdx].find(AddressSpace)->second;
+    setActions(TypeIndex, Actions, SizeAndActions);
+  }
+
+  /// If an operation on a given vector type (say <M x iN>) isn't explicitly
+  /// specified, we proceed in 2 stages. First we legalize the underlying scalar
+  /// (so that there's at least one legal vector with that scalar), then we
+  /// adjust the number of elements in the vector so that it is legal. The
+  /// desired action in the first step is controlled by this function.
+  void setScalarInVectorAction(const unsigned Opcode, const unsigned TypeIndex,
+                               const SizeAndActionsVec &SizeAndActions) {
+    unsigned OpcodeIdx = Opcode - FirstOp;
+    SmallVector<SizeAndActionsVec, 1> &Actions =
+        ScalarInVectorActions[OpcodeIdx];
+    setActions(TypeIndex, Actions, SizeAndActions);
+  }
+
+  /// See also setScalarInVectorAction.
+  /// This function let's you specify the number of elements in a vector that
+  /// are legal for a legal element size.
+  void setVectorNumElementAction(const unsigned Opcode,
+                                 const unsigned TypeIndex,
+                                 const unsigned ElementSize,
+                                 const SizeAndActionsVec &SizeAndActions) {
+    const unsigned OpcodeIdx = Opcode - FirstOp;
+    if (NumElements2Actions[OpcodeIdx].find(ElementSize) ==
+        NumElements2Actions[OpcodeIdx].end())
+      NumElements2Actions[OpcodeIdx][ElementSize] = {{}};
+    SmallVector<SizeAndActionsVec, 1> &Actions =
+        NumElements2Actions[OpcodeIdx].find(ElementSize)->second;
+    setActions(TypeIndex, Actions, SizeAndActions);
+  }
+
+  /// A partial SizeAndActionsVec potentially doesn't cover all bit sizes,
+  /// i.e. it's OK if it doesn't start from size 1.
+  static void checkPartialSizeAndActionsVector(const SizeAndActionsVec& v) {
+    using namespace LegalizeActions;
+#ifndef NDEBUG
+    // The sizes should be in increasing order
+    int prev_size = -1;
+    for(auto SizeAndAction: v) {
+      assert(SizeAndAction.first > prev_size);
+      prev_size = SizeAndAction.first;
+    }
+    // - for every Widen action, there should be a larger bitsize that
+    //   can be legalized towards (e.g. Legal, Lower, Libcall or Custom
+    //   action).
+    // - for every Narrow action, there should be a smaller bitsize that
+    //   can be legalized towards.
+    int SmallestNarrowIdx = -1;
+    int LargestWidenIdx = -1;
+    int SmallestLegalizableToSameSizeIdx = -1;
+    int LargestLegalizableToSameSizeIdx = -1;
+    for(size_t i=0; i<v.size(); ++i) {
+      switch (v[i].second) {
+        case FewerElements:
+        case NarrowScalar:
+          if (SmallestNarrowIdx == -1)
+            SmallestNarrowIdx = i;
+          break;
+        case WidenScalar:
+        case MoreElements:
+          LargestWidenIdx = i;
+          break;
+        case Unsupported:
+          break;
+        default:
+          if (SmallestLegalizableToSameSizeIdx == -1)
+            SmallestLegalizableToSameSizeIdx = i;
+          LargestLegalizableToSameSizeIdx = i;
+      }
+    }
+    if (SmallestNarrowIdx != -1) {
+      assert(SmallestLegalizableToSameSizeIdx != -1);
+      assert(SmallestNarrowIdx > SmallestLegalizableToSameSizeIdx);
+    }
+    if (LargestWidenIdx != -1)
+      assert(LargestWidenIdx < LargestLegalizableToSameSizeIdx);
+#endif
+  }
+
+  /// A full SizeAndActionsVec must cover all bit sizes, i.e. must start with
+  /// from size 1.
+  static void checkFullSizeAndActionsVector(const SizeAndActionsVec& v) {
+#ifndef NDEBUG
+    // Data structure invariant: The first bit size must be size 1.
+    assert(v.size() >= 1);
+    assert(v[0].first == 1);
+    checkPartialSizeAndActionsVector(v);
+#endif
+  }
+
+  /// Sets actions for all bit sizes on a particular generic opcode, type
+  /// index and scalar or pointer type.
+  void setActions(unsigned TypeIndex,
+                  SmallVector<SizeAndActionsVec, 1> &Actions,
+                  const SizeAndActionsVec &SizeAndActions) {
+    checkFullSizeAndActionsVector(SizeAndActions);
+    if (Actions.size() <= TypeIndex)
+      Actions.resize(TypeIndex + 1);
+    Actions[TypeIndex] = SizeAndActions;
+  }
+
+  static SizeAndAction findAction(const SizeAndActionsVec &Vec,
+                                  const uint32_t Size);
+
+  /// Returns the next action needed to get the scalar or pointer type closer
+  /// to being legal
+  /// E.g. findLegalAction({G_REM, 13}) should return
+  /// (WidenScalar, 32). After that, findLegalAction({G_REM, 32}) will
+  /// probably be called, which should return (Lower, 32).
+  /// This is assuming the setScalarAction on G_REM was something like:
+  /// setScalarAction(G_REM, 0,
+  ///           {{1, WidenScalar},  // bit sizes [ 1, 31[
+  ///            {32, Lower},       // bit sizes [32, 33[
+  ///            {33, NarrowScalar} // bit sizes [65, +inf[
+  ///           });
+  std::pair<LegalizeAction, LLT>
+  findScalarLegalAction(const InstrAspect &Aspect) const;
+
+  /// Returns the next action needed towards legalizing the vector type.
+  std::pair<LegalizeAction, LLT>
+  findVectorLegalAction(const InstrAspect &Aspect) const;
+
+  static const int FirstOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_START;
+  static const int LastOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END;
+
+  // Data structures used temporarily during construction of legality data:
+  using TypeMap = DenseMap<LLT, LegalizeAction>;
+  SmallVector<TypeMap, 1> SpecifiedActions[LastOp - FirstOp + 1];
+  SmallVector<SizeChangeStrategy, 1>
+      ScalarSizeChangeStrategies[LastOp - FirstOp + 1];
+  SmallVector<SizeChangeStrategy, 1>
+      VectorElementSizeChangeStrategies[LastOp - FirstOp + 1];
+  bool TablesInitialized;
+
+  // Data structures used by getAction:
+  SmallVector<SizeAndActionsVec, 1> ScalarActions[LastOp - FirstOp + 1];
+  SmallVector<SizeAndActionsVec, 1> ScalarInVectorActions[LastOp - FirstOp + 1];
+  std::unordered_map<uint16_t, SmallVector<SizeAndActionsVec, 1>>
+      AddrSpace2PointerActions[LastOp - FirstOp + 1];
+  std::unordered_map<uint16_t, SmallVector<SizeAndActionsVec, 1>>
+      NumElements2Actions[LastOp - FirstOp + 1];
+
+  LegalizeRuleSet RulesForOpcode[LastOp - FirstOp + 1];
+};
+
+#ifndef NDEBUG
+/// Checks that MIR is fully legal, returns an illegal instruction if it's not,
+/// nullptr otherwise
+const MachineInstr *machineFunctionIsIllegal(const MachineFunction &MF);
+#endif
+
+} // end namespace llvm.
+
+#endif // LLVM_CODEGEN_GLOBALISEL_LEGALIZERINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h
new file mode 100644
index 0000000..0a46eb9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h
@@ -0,0 +1,78 @@
+//== llvm/CodeGen/GlobalISel/Localizer.h - Localizer -------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file describes the interface of the Localizer pass.
+/// This pass moves/duplicates constant-like instructions close to their uses.
+/// Its primarily goal is to workaround the deficiencies of the fast register
+/// allocator.
+/// With GlobalISel constants are all materialized in the entry block of
+/// a function. However, the fast allocator cannot rematerialize constants and
+/// has a lot more live-ranges to deal with and will most likely end up
+/// spilling a lot.
+/// By pushing the constants close to their use, we only create small
+/// live-ranges.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_LOCALIZER_H
+#define LLVM_CODEGEN_GLOBALISEL_LOCALIZER_H
+
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+// Forward declarations.
+class MachineRegisterInfo;
+
+/// This pass implements the localization mechanism described at the
+/// top of this file. One specificity of the implementation is that
+/// it will materialize one and only one instance of a constant per
+/// basic block, thus enabling reuse of that constant within that block.
+/// Moreover, it only materializes constants in blocks where they
+/// are used. PHI uses are considered happening at the end of the
+/// related predecessor.
+class Localizer : public MachineFunctionPass {
+public:
+  static char ID;
+
+private:
+  /// MRI contains all the register class/bank information that this
+  /// pass uses and updates.
+  MachineRegisterInfo *MRI;
+
+  /// Check whether or not \p MI needs to be moved close to its uses.
+  static bool shouldLocalize(const MachineInstr &MI);
+
+  /// Check if \p MOUse is used in the same basic block as \p Def.
+  /// If the use is in the same block, we say it is local.
+  /// When the use is not local, \p InsertMBB will contain the basic
+  /// block when to insert \p Def to have a local use.
+  static bool isLocalUse(MachineOperand &MOUse, const MachineInstr &Def,
+                         MachineBasicBlock *&InsertMBB);
+
+  /// Initialize the field members using \p MF.
+  void init(MachineFunction &MF);
+
+public:
+  Localizer();
+
+  StringRef getPassName() const override { return "Localizer"; }
+
+  MachineFunctionProperties getRequiredProperties() const override {
+    return MachineFunctionProperties()
+        .set(MachineFunctionProperties::Property::IsSSA)
+        .set(MachineFunctionProperties::Property::Legalized)
+        .set(MachineFunctionProperties::Property::RegBankSelected);
+  }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+};
+
+} // End namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
new file mode 100644
index 0000000..797f5e5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
@@ -0,0 +1,327 @@
+//== ----- llvm/CodeGen/GlobalISel/MIPatternMatch.h --------------------- == //
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// Contains matchers for matching SSA Machine Instructions.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_GMIR_PATTERNMATCH_H
+#define LLVM_GMIR_PATTERNMATCH_H
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+namespace llvm {
+namespace MIPatternMatch {
+
+template <typename Reg, typename Pattern>
+bool mi_match(Reg R, MachineRegisterInfo &MRI, Pattern &&P) {
+  return P.match(MRI, R);
+}
+
+// TODO: Extend for N use.
+template <typename SubPatternT> struct OneUse_match {
+  SubPatternT SubPat;
+  OneUse_match(const SubPatternT &SP) : SubPat(SP) {}
+
+  template <typename OpTy>
+  bool match(const MachineRegisterInfo &MRI, unsigned Reg) {
+    return MRI.hasOneUse(Reg) && SubPat.match(MRI, Reg);
+  }
+};
+
+template <typename SubPat>
+inline OneUse_match<SubPat> m_OneUse(const SubPat &SP) {
+  return SP;
+}
+
+struct ConstantMatch {
+  int64_t &CR;
+  ConstantMatch(int64_t &C) : CR(C) {}
+  bool match(const MachineRegisterInfo &MRI, unsigned Reg) {
+    if (auto MaybeCst = getConstantVRegVal(Reg, MRI)) {
+      CR = *MaybeCst;
+      return true;
+    }
+    return false;
+  }
+};
+
+inline ConstantMatch m_ICst(int64_t &Cst) { return ConstantMatch(Cst); }
+
+// TODO: Rework this for different kinds of MachineOperand.
+// Currently assumes the Src for a match is a register.
+// We might want to support taking in some MachineOperands and call getReg on
+// that.
+
+struct operand_type_match {
+  bool match(const MachineRegisterInfo &MRI, unsigned Reg) { return true; }
+  bool match(const MachineRegisterInfo &MRI, MachineOperand *MO) {
+    return MO->isReg();
+  }
+};
+
+inline operand_type_match m_Reg() { return operand_type_match(); }
+
+/// Matching combinators.
+template <typename... Preds> struct And {
+  template <typename MatchSrc>
+  bool match(MachineRegisterInfo &MRI, MatchSrc &&src) {
+    return true;
+  }
+};
+
+template <typename Pred, typename... Preds>
+struct And<Pred, Preds...> : And<Preds...> {
+  Pred P;
+  And(Pred &&p, Preds &&... preds)
+      : And<Preds...>(std::forward<Preds>(preds)...), P(std::forward<Pred>(p)) {
+  }
+  template <typename MatchSrc>
+  bool match(MachineRegisterInfo &MRI, MatchSrc &&src) {
+    return P.match(MRI, src) && And<Preds...>::match(MRI, src);
+  }
+};
+
+template <typename... Preds> struct Or {
+  template <typename MatchSrc>
+  bool match(MachineRegisterInfo &MRI, MatchSrc &&src) {
+    return false;
+  }
+};
+
+template <typename Pred, typename... Preds>
+struct Or<Pred, Preds...> : Or<Preds...> {
+  Pred P;
+  Or(Pred &&p, Preds &&... preds)
+      : Or<Preds...>(std::forward<Preds>(preds)...), P(std::forward<Pred>(p)) {}
+  template <typename MatchSrc>
+  bool match(MachineRegisterInfo &MRI, MatchSrc &&src) {
+    return P.match(MRI, src) || Or<Preds...>::match(MRI, src);
+  }
+};
+
+template <typename... Preds> And<Preds...> m_all_of(Preds &&... preds) {
+  return And<Preds...>(std::forward<Preds>(preds)...);
+}
+
+template <typename... Preds> Or<Preds...> m_any_of(Preds &&... preds) {
+  return Or<Preds...>(std::forward<Preds>(preds)...);
+}
+
+template <typename BindTy> struct bind_helper {
+  static bool bind(const MachineRegisterInfo &MRI, BindTy &VR, BindTy &V) {
+    VR = V;
+    return true;
+  }
+};
+
+template <> struct bind_helper<MachineInstr *> {
+  static bool bind(const MachineRegisterInfo &MRI, MachineInstr *&MI,
+                   unsigned Reg) {
+    MI = MRI.getVRegDef(Reg);
+    if (MI)
+      return true;
+    return false;
+  }
+};
+
+template <> struct bind_helper<LLT> {
+  static bool bind(const MachineRegisterInfo &MRI, LLT &Ty, unsigned Reg) {
+    Ty = MRI.getType(Reg);
+    if (Ty.isValid())
+      return true;
+    return false;
+  }
+};
+
+template <> struct bind_helper<const ConstantFP *> {
+  static bool bind(const MachineRegisterInfo &MRI, const ConstantFP *&F,
+                   unsigned Reg) {
+    F = getConstantFPVRegVal(Reg, MRI);
+    if (F)
+      return true;
+    return false;
+  }
+};
+
+template <typename Class> struct bind_ty {
+  Class &VR;
+
+  bind_ty(Class &V) : VR(V) {}
+
+  template <typename ITy> bool match(const MachineRegisterInfo &MRI, ITy &&V) {
+    return bind_helper<Class>::bind(MRI, VR, V);
+  }
+};
+
+inline bind_ty<unsigned> m_Reg(unsigned &R) { return R; }
+inline bind_ty<MachineInstr *> m_MInstr(MachineInstr *&MI) { return MI; }
+inline bind_ty<LLT> m_Type(LLT &Ty) { return Ty; }
+
+// Helper for matching G_FCONSTANT
+inline bind_ty<const ConstantFP *> m_GFCst(const ConstantFP *&C) { return C; }
+
+// General helper for all the binary generic MI such as G_ADD/G_SUB etc
+template <typename LHS_P, typename RHS_P, unsigned Opcode,
+          bool Commutable = false>
+struct BinaryOp_match {
+  LHS_P L;
+  RHS_P R;
+
+  BinaryOp_match(const LHS_P &LHS, const RHS_P &RHS) : L(LHS), R(RHS) {}
+  template <typename OpTy> bool match(MachineRegisterInfo &MRI, OpTy &&Op) {
+    MachineInstr *TmpMI;
+    if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
+      if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 3) {
+        return (L.match(MRI, TmpMI->getOperand(1).getReg()) &&
+                R.match(MRI, TmpMI->getOperand(2).getReg())) ||
+               (Commutable && (R.match(MRI, TmpMI->getOperand(1).getReg()) &&
+                               L.match(MRI, TmpMI->getOperand(2).getReg())));
+      }
+    }
+    return false;
+  }
+};
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_ADD, true>
+m_GAdd(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_ADD, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_SUB> m_GSub(const LHS &L,
+                                                            const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_SUB>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_MUL, true>
+m_GMul(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_MUL, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_FADD, true>
+m_GFAdd(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_FADD, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_FMUL, true>
+m_GFMul(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_FMUL, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_AND, true>
+m_GAnd(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_AND, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_OR, true> m_GOr(const LHS &L,
+                                                                const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_OR, true>(L, R);
+}
+
+// Helper for unary instructions (G_[ZSA]EXT/G_TRUNC) etc
+template <typename SrcTy, unsigned Opcode> struct UnaryOp_match {
+  SrcTy L;
+
+  UnaryOp_match(const SrcTy &LHS) : L(LHS) {}
+  template <typename OpTy> bool match(MachineRegisterInfo &MRI, OpTy &&Op) {
+    MachineInstr *TmpMI;
+    if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
+      if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 2) {
+        return L.match(MRI, TmpMI->getOperand(1).getReg());
+      }
+    }
+    return false;
+  }
+};
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_ANYEXT>
+m_GAnyExt(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_ANYEXT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_SEXT> m_GSExt(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_SEXT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_ZEXT> m_GZExt(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_ZEXT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_FPEXT> m_GFPExt(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_FPEXT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_TRUNC> m_GTrunc(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_TRUNC>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_BITCAST>
+m_GBitcast(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_BITCAST>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_PTRTOINT>
+m_GPtrToInt(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_PTRTOINT>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_INTTOPTR>
+m_GIntToPtr(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_INTTOPTR>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_FPTRUNC>
+m_GFPTrunc(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_FPTRUNC>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_FABS> m_GFabs(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_FABS>(Src);
+}
+
+template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::COPY> m_Copy(SrcTy &&Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::COPY>(std::forward<SrcTy>(Src));
+}
+
+// Helper for checking if a Reg is of specific type.
+struct CheckType {
+  LLT Ty;
+  CheckType(const LLT &Ty) : Ty(Ty) {}
+
+  bool match(MachineRegisterInfo &MRI, unsigned Reg) {
+    return MRI.getType(Reg) == Ty;
+  }
+};
+
+inline CheckType m_SpecificType(LLT Ty) { return Ty; }
+
+} // namespace GMIPatternMatch
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
new file mode 100644
index 0000000..ef4e0ad
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -0,0 +1,808 @@
+//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.h - MIBuilder --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares the MachineIRBuilder class.
+/// This is a helper class to build MachineInstr.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H
+#define LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H
+
+#include "llvm/CodeGen/GlobalISel/Types.h"
+
+#include "llvm/CodeGen/LowLevelType.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugLoc.h"
+
+
+namespace llvm {
+
+// Forward declarations.
+class MachineFunction;
+class MachineInstr;
+class TargetInstrInfo;
+
+/// Helper class to build MachineInstr.
+/// It keeps internally the insertion point and debug location for all
+/// the new instructions we want to create.
+/// This information can be modify via the related setters.
+class MachineIRBuilder {
+  /// MachineFunction under construction.
+  MachineFunction *MF;
+  /// Information used to access the description of the opcodes.
+  const TargetInstrInfo *TII;
+  /// Information used to verify types are consistent and to create virtual registers.
+  MachineRegisterInfo *MRI;
+  /// Debug location to be set to any instruction we create.
+  DebugLoc DL;
+
+  /// \name Fields describing the insertion point.
+  /// @{
+  MachineBasicBlock *MBB;
+  MachineBasicBlock::iterator II;
+  /// @}
+
+  std::function<void(MachineInstr *)> InsertedInstr;
+
+  const TargetInstrInfo &getTII() {
+    assert(TII && "TargetInstrInfo is not set");
+    return *TII;
+  }
+
+  void validateTruncExt(unsigned Dst, unsigned Src, bool IsExtend);
+  MachineInstrBuilder buildBinaryOp(unsigned Opcode, unsigned Res, unsigned Op0, unsigned Op1);
+
+  unsigned getDestFromArg(unsigned Reg) { return Reg; }
+  unsigned getDestFromArg(LLT Ty) {
+    return getMF().getRegInfo().createGenericVirtualRegister(Ty);
+  }
+  unsigned getDestFromArg(const TargetRegisterClass *RC) {
+    return getMF().getRegInfo().createVirtualRegister(RC);
+  }
+
+  void addUseFromArg(MachineInstrBuilder &MIB, unsigned Reg) {
+    MIB.addUse(Reg);
+  }
+
+  void addUseFromArg(MachineInstrBuilder &MIB, const MachineInstrBuilder &UseMIB) {
+    MIB.addUse(UseMIB->getOperand(0).getReg());
+  }
+
+  void addUsesFromArgs(MachineInstrBuilder &MIB) { }
+  template<typename UseArgTy, typename ... UseArgsTy>
+  void addUsesFromArgs(MachineInstrBuilder &MIB, UseArgTy &&Arg1, UseArgsTy &&... Args) {
+    addUseFromArg(MIB, Arg1);
+    addUsesFromArgs(MIB, std::forward<UseArgsTy>(Args)...);
+  }
+  unsigned getRegFromArg(unsigned Reg) { return Reg; }
+  unsigned getRegFromArg(const MachineInstrBuilder &MIB) {
+    return MIB->getOperand(0).getReg();
+  }
+
+public:
+  /// Some constructors for easy use.
+  MachineIRBuilder() = default;
+  MachineIRBuilder(MachineFunction &MF) { setMF(MF); }
+  MachineIRBuilder(MachineInstr &MI) : MachineIRBuilder(*MI.getMF()) {
+    setInstr(MI);
+  }
+
+  /// Getter for the function we currently build.
+  MachineFunction &getMF() {
+    assert(MF && "MachineFunction is not set");
+    return *MF;
+  }
+
+  /// Getter for the basic block we currently build.
+  MachineBasicBlock &getMBB() {
+    assert(MBB && "MachineBasicBlock is not set");
+    return *MBB;
+  }
+
+  /// Current insertion point for new instructions.
+  MachineBasicBlock::iterator getInsertPt() {
+    return II;
+  }
+
+  /// Set the insertion point before the specified position.
+  /// \pre MBB must be in getMF().
+  /// \pre II must be a valid iterator in MBB.
+  void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II);
+  /// @}
+
+  /// \name Setters for the insertion point.
+  /// @{
+  /// Set the MachineFunction where to build instructions.
+  void setMF(MachineFunction &);
+
+  /// Set the insertion point to the  end of \p MBB.
+  /// \pre \p MBB must be contained by getMF().
+  void setMBB(MachineBasicBlock &MBB);
+
+  /// Set the insertion point to before MI.
+  /// \pre MI must be in getMF().
+  void setInstr(MachineInstr &MI);
+  /// @}
+
+  /// \name Control where instructions we create are recorded (typically for
+  /// visiting again later during legalization).
+  /// @{
+  void recordInsertions(std::function<void(MachineInstr *)> InsertedInstr);
+  void stopRecordingInsertions();
+  /// @}
+
+  /// Set the debug location to \p DL for all the next build instructions.
+  void setDebugLoc(const DebugLoc &DL) { this->DL = DL; }
+
+  /// Get the current instruction's debug location.
+  DebugLoc getDebugLoc() { return DL; }
+
+  /// Build and insert <empty> = \p Opcode <empty>.
+  /// The insertion point is the one set by the last call of either
+  /// setBasicBlock or setMI.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildInstr(unsigned Opcode);
+
+  /// DAG like Generic method for building arbitrary instructions as above.
+  /// \Opc opcode for the instruction.
+  /// \Ty Either LLT/TargetRegisterClass/unsigned types for Dst
+  /// \Args Variadic list of uses of types(unsigned/MachineInstrBuilder)
+  /// Uses of type MachineInstrBuilder will perform
+  /// getOperand(0).getReg() to convert to register.
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty,
+                                 UseArgsTy &&... Args) {
+    auto MIB = buildInstr(Opc).addDef(getDestFromArg(Ty));
+    addUsesFromArgs(MIB, std::forward<UseArgsTy>(Args)...);
+    return MIB;
+  }
+
+  /// Build but don't insert <empty> = \p Opcode <empty>.
+  ///
+  /// \pre setMF, setBasicBlock or setMI  must have been called.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildInstrNoInsert(unsigned Opcode);
+
+  /// Insert an existing instruction at the insertion point.
+  MachineInstrBuilder insertInstr(MachineInstrBuilder MIB);
+
+  /// Build and insert a DBG_VALUE instruction expressing the fact that the
+  /// associated \p Variable lives in \p Reg (suitably modified by \p Expr).
+  MachineInstrBuilder buildDirectDbgValue(unsigned Reg, const MDNode *Variable,
+                                          const MDNode *Expr);
+
+  /// Build and insert a DBG_VALUE instruction expressing the fact that the
+  /// associated \p Variable lives in memory at \p Reg (suitably modified by \p
+  /// Expr).
+  MachineInstrBuilder buildIndirectDbgValue(unsigned Reg,
+                                            const MDNode *Variable,
+                                            const MDNode *Expr);
+
+  /// Build and insert a DBG_VALUE instruction expressing the fact that the
+  /// associated \p Variable lives in the stack slot specified by \p FI
+  /// (suitably modified by \p Expr).
+  MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable,
+                                      const MDNode *Expr);
+
+  /// Build and insert a DBG_VALUE instructions specifying that \p Variable is
+  /// given by \p C (suitably modified by \p Expr).
+  MachineInstrBuilder buildConstDbgValue(const Constant &C,
+                                         const MDNode *Variable,
+                                         const MDNode *Expr);
+
+  /// Build and insert \p Res = G_FRAME_INDEX \p Idx
+  ///
+  /// G_FRAME_INDEX materializes the address of an alloca value or other
+  /// stack-based object.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with pointer type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx);
+
+  /// Build and insert \p Res = G_GLOBAL_VALUE \p GV
+  ///
+  /// G_GLOBAL_VALUE materializes the address of the specified global
+  /// into \p Res.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with pointer type
+  ///      in the same address space as \p GV.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildGlobalValue(unsigned Res, const GlobalValue *GV);
+
+  /// Build and insert \p Res = G_ADD \p Op0, \p Op1
+  ///
+  /// G_ADD sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+  /// truncated to their width.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildAdd(unsigned Res, unsigned Op0,
+                               unsigned Op1);
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildAdd(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+    unsigned Res = getDestFromArg(Ty);
+    return buildAdd(Res, (getRegFromArg(UseArgs))...);
+  }
+
+  /// Build and insert \p Res = G_SUB \p Op0, \p Op1
+  ///
+  /// G_SUB sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+  /// truncated to their width.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildSub(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+    unsigned Res = getDestFromArg(Ty);
+    return buildSub(Res, (getRegFromArg(UseArgs))...);
+  }
+  MachineInstrBuilder buildSub(unsigned Res, unsigned Op0,
+                               unsigned Op1);
+
+  /// Build and insert \p Res = G_MUL \p Op0, \p Op1
+  ///
+  /// G_MUL sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+  /// truncated to their width.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildMul(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+    unsigned Res = getDestFromArg(Ty);
+    return buildMul(Res, (getRegFromArg(UseArgs))...);
+  }
+  MachineInstrBuilder buildMul(unsigned Res, unsigned Op0,
+                               unsigned Op1);
+
+  /// Build and insert \p Res = G_GEP \p Op0, \p Op1
+  ///
+  /// G_GEP adds \p Op1 bytes to the pointer specified by \p Op0,
+  /// storing the resulting pointer in \p Res.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res and \p Op0 must be generic virtual registers with pointer
+  ///      type.
+  /// \pre \p Op1 must be a generic virtual register with scalar type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0,
+                               unsigned Op1);
+
+  /// Materialize and insert \p Res = G_GEP \p Op0, (G_CONSTANT \p Value)
+  ///
+  /// G_GEP adds \p Value bytes to the pointer specified by \p Op0,
+  /// storing the resulting pointer in \p Res. If \p Value is zero then no
+  /// G_GEP or G_CONSTANT will be created and \pre Op0 will be assigned to
+  /// \p Res.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Op0 must be a generic virtual register with pointer type.
+  /// \pre \p ValueTy must be a scalar type.
+  /// \pre \p Res must be 0. This is to detect confusion between
+  ///      materializeGEP() and buildGEP().
+  /// \post \p Res will either be a new generic virtual register of the same
+  ///       type as \p Op0 or \p Op0 itself.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  Optional<MachineInstrBuilder> materializeGEP(unsigned &Res, unsigned Op0,
+                                               const LLT &ValueTy,
+                                               uint64_t Value);
+
+  /// Build and insert \p Res = G_PTR_MASK \p Op0, \p NumBits
+  ///
+  /// G_PTR_MASK clears the low bits of a pointer operand without destroying its
+  /// pointer properties. This has the effect of rounding the address *down* to
+  /// a specified alignment in bits.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res and \p Op0 must be generic virtual registers with pointer
+  ///      type.
+  /// \pre \p NumBits must be an integer representing the number of low bits to
+  ///      be cleared in \p Op0.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildPtrMask(unsigned Res, unsigned Op0,
+                                   uint32_t NumBits);
+
+  /// Build and insert \p Res, \p CarryOut = G_UADDE \p Op0,
+  /// \p Op1, \p CarryIn
+  ///
+  /// G_UADDE sets \p Res to \p Op0 + \p Op1 + \p CarryIn (truncated to the bit
+  /// width) and sets \p CarryOut to 1 if the result overflowed in unsigned
+  /// arithmetic.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same scalar type.
+  /// \pre \p CarryOut and \p CarryIn must be generic virtual
+  ///      registers with the same scalar type (typically s1)
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildUAdde(unsigned Res, unsigned CarryOut, unsigned Op0,
+                                 unsigned Op1, unsigned CarryIn);
+
+  /// Build and insert \p Res = G_AND \p Op0, \p Op1
+  ///
+  /// G_AND sets \p Res to the bitwise and of integer parameters \p Op0 and \p
+  /// Op1.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildAnd(DstTy &&Dst, UseArgsTy &&... UseArgs) {
+    return buildAnd(getDestFromArg(Dst), getRegFromArg(UseArgs)...);
+  }
+  MachineInstrBuilder buildAnd(unsigned Res, unsigned Op0,
+                               unsigned Op1);
+
+  /// Build and insert \p Res = G_OR \p Op0, \p Op1
+  ///
+  /// G_OR sets \p Res to the bitwise or of integer parameters \p Op0 and \p
+  /// Op1.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildOr(DstTy &&Dst, UseArgsTy &&... UseArgs) {
+    return buildOr(getDestFromArg(Dst), getRegFromArg(UseArgs)...);
+  }
+  MachineInstrBuilder buildOr(unsigned Res, unsigned Op0, unsigned Op1);
+
+  /// Build and insert \p Res = G_ANYEXT \p Op0
+  ///
+  /// G_ANYEXT produces a register of the specified width, with bits 0 to
+  /// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are unspecified
+  /// (i.e. this is neither zero nor sign-extension). For a vector register,
+  /// each element is extended individually.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be smaller than \p Res
+  ///
+  /// \return The newly created instruction.
+
+  MachineInstrBuilder buildAnyExt(unsigned Res, unsigned Op);
+  template <typename DstType, typename ArgType>
+  MachineInstrBuilder buildAnyExt(DstType &&Res, ArgType &&Arg) {
+    return buildAnyExt(getDestFromArg(Res), getRegFromArg(Arg));
+  }
+
+  /// Build and insert \p Res = G_SEXT \p Op
+  ///
+  /// G_SEXT produces a register of the specified width, with bits 0 to
+  /// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are duplicated from the
+  /// high bit of \p Op (i.e. 2s-complement sign extended).
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be smaller than \p Res
+  ///
+  /// \return The newly created instruction.
+  template <typename DstType, typename ArgType>
+  MachineInstrBuilder buildSExt(DstType &&Res, ArgType &&Arg) {
+    return buildSExt(getDestFromArg(Res), getRegFromArg(Arg));
+  }
+  MachineInstrBuilder buildSExt(unsigned Res, unsigned Op);
+
+  /// Build and insert \p Res = G_ZEXT \p Op
+  ///
+  /// G_ZEXT produces a register of the specified width, with bits 0 to
+  /// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are 0. For a vector
+  /// register, each element is extended individually.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be smaller than \p Res
+  ///
+  /// \return The newly created instruction.
+  template <typename DstType, typename ArgType>
+  MachineInstrBuilder buildZExt(DstType &&Res, ArgType &&Arg) {
+    return buildZExt(getDestFromArg(Res), getRegFromArg(Arg));
+  }
+  MachineInstrBuilder buildZExt(unsigned Res, unsigned Op);
+
+  /// Build and insert \p Res = G_SEXT \p Op, \p Res = G_TRUNC \p Op, or
+  /// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
+  ///  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  ///
+  /// \return The newly created instruction.
+  template <typename DstTy, typename UseArgTy>
+  MachineInstrBuilder buildSExtOrTrunc(DstTy &&Dst, UseArgTy &&Use) {
+    return buildSExtOrTrunc(getDestFromArg(Dst), getRegFromArg(Use));
+  }
+  MachineInstrBuilder buildSExtOrTrunc(unsigned Res, unsigned Op);
+
+  /// Build and insert \p Res = G_ZEXT \p Op, \p Res = G_TRUNC \p Op, or
+  /// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
+  ///  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  ///
+  /// \return The newly created instruction.
+  template <typename DstTy, typename UseArgTy>
+  MachineInstrBuilder buildZExtOrTrunc(DstTy &&Dst, UseArgTy &&Use) {
+    return buildZExtOrTrunc(getDestFromArg(Dst), getRegFromArg(Use));
+  }
+  MachineInstrBuilder buildZExtOrTrunc(unsigned Res, unsigned Op);
+
+  // Build and insert \p Res = G_ANYEXT \p Op, \p Res = G_TRUNC \p Op, or
+  /// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
+  ///  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  ///
+  /// \return The newly created instruction.
+  template <typename DstTy, typename UseArgTy>
+  MachineInstrBuilder buildAnyExtOrTrunc(DstTy &&Dst, UseArgTy &&Use) {
+    return buildAnyExtOrTrunc(getDestFromArg(Dst), getRegFromArg(Use));
+  }
+  MachineInstrBuilder buildAnyExtOrTrunc(unsigned Res, unsigned Op);
+
+  /// Build and insert \p Res = \p ExtOpc, \p Res = G_TRUNC \p
+  /// Op, or \p Res = COPY \p Op depending on the differing sizes of \p Res and
+  /// \p Op.
+  ///  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, unsigned Res,
+                                      unsigned Op);
+
+  /// Build and insert an appropriate cast between two registers of equal size.
+  template <typename DstType, typename ArgType>
+  MachineInstrBuilder buildCast(DstType &&Res, ArgType &&Arg) {
+    return buildCast(getDestFromArg(Res), getRegFromArg(Arg));
+  }
+  MachineInstrBuilder buildCast(unsigned Dst, unsigned Src);
+
+  /// Build and insert G_BR \p Dest
+  ///
+  /// G_BR is an unconditional branch to \p Dest.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildBr(MachineBasicBlock &BB);
+
+  /// Build and insert G_BRCOND \p Tst, \p Dest
+  ///
+  /// G_BRCOND is a conditional branch to \p Dest.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Tst must be a generic virtual register with scalar
+  ///      type. At the beginning of legalization, this will be a single
+  ///      bit (s1). Targets with interesting flags registers may change
+  ///      this. For a wider type, whether the branch is taken must only
+  ///      depend on bit 0 (for now).
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &BB);
+
+  /// Build and insert G_BRINDIRECT \p Tgt
+  ///
+  /// G_BRINDIRECT is an indirect branch to \p Tgt.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Tgt must be a generic virtual register with pointer type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildBrIndirect(unsigned Tgt);
+
+  /// Build and insert \p Res = G_CONSTANT \p Val
+  ///
+  /// G_CONSTANT is an integer constant with the specified size and value. \p
+  /// Val will be extended or truncated to the size of \p Reg.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or pointer
+  ///      type.
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildConstant(unsigned Res, const ConstantInt &Val);
+
+  /// Build and insert \p Res = G_CONSTANT \p Val
+  ///
+  /// G_CONSTANT is an integer constant with the specified size and value.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar type.
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildConstant(unsigned Res, int64_t Val);
+
+  template <typename DstType>
+  MachineInstrBuilder buildConstant(DstType &&Res, int64_t Val) {
+    return buildConstant(getDestFromArg(Res), Val);
+  }
+  /// Build and insert \p Res = G_FCONSTANT \p Val
+  ///
+  /// G_FCONSTANT is a floating-point constant with the specified size and
+  /// value.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar type.
+  ///
+  /// \return The newly created instruction.
+  template <typename DstType>
+  MachineInstrBuilder buildFConstant(DstType &&Res, const ConstantFP &Val) {
+    return buildFConstant(getDestFromArg(Res), Val);
+  }
+  MachineInstrBuilder buildFConstant(unsigned Res, const ConstantFP &Val);
+
+  template <typename DstType>
+  MachineInstrBuilder buildFConstant(DstType &&Res, double Val) {
+    return buildFConstant(getDestFromArg(Res), Val);
+  }
+  MachineInstrBuilder buildFConstant(unsigned Res, double Val);
+
+  /// Build and insert \p Res = COPY Op
+  ///
+  /// Register-to-register COPY sets \p Res to \p Op.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildCopy(unsigned Res, unsigned Op);
+  template <typename DstType, typename SrcType>
+  MachineInstrBuilder buildCopy(DstType &&Res, SrcType &&Src) {
+    return buildCopy(getDestFromArg(Res), getRegFromArg(Src));
+  }
+
+  /// Build and insert `Res = G_LOAD Addr, MMO`.
+  ///
+  /// Loads the value stored at \p Addr. Puts the result in \p Res.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr,
+                                MachineMemOperand &MMO);
+
+  /// Build and insert `G_STORE Val, Addr, MMO`.
+  ///
+  /// Stores the value \p Val to \p Addr.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Val must be a generic virtual register.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildStore(unsigned Val, unsigned Addr,
+                                 MachineMemOperand &MMO);
+
+  /// Build and insert `Res0, ... = G_EXTRACT Src, Idx0`.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res and \p Src must be generic virtual registers.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildExtract(unsigned Res, unsigned Src, uint64_t Index);
+
+  /// Build and insert \p Res = IMPLICIT_DEF.
+  template <typename DstType> MachineInstrBuilder buildUndef(DstType &&Res) {
+    return buildUndef(getDestFromArg(Res));
+  }
+  MachineInstrBuilder buildUndef(unsigned Dst);
+
+  /// Build and insert instructions to put \p Ops together at the specified p
+  /// Indices to form a larger register.
+  ///
+  /// If the types of the input registers are uniform and cover the entirity of
+  /// \p Res then a G_MERGE_VALUES will be produced. Otherwise an IMPLICIT_DEF
+  /// followed by a sequence of G_INSERT instructions.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre The final element of the sequence must not extend past the end of the
+  ///      destination register.
+  /// \pre The bits defined by each Op (derived from index and scalar size) must
+  ///      not overlap.
+  /// \pre \p Indices must be in ascending order of bit position.
+  void buildSequence(unsigned Res, ArrayRef<unsigned> Ops,
+                     ArrayRef<uint64_t> Indices);
+
+  /// Build and insert \p Res = G_MERGE_VALUES \p Op0, ...
+  ///
+  /// G_MERGE_VALUES combines the input elements contiguously into a larger
+  /// register.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre The entire register \p Res (and no more) must be covered by the input
+  ///      registers.
+  /// \pre The type of all \p Ops registers must be identical.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildMerge(unsigned Res, ArrayRef<unsigned> Ops);
+
+  /// Build and insert \p Res0, ... = G_UNMERGE_VALUES \p Op
+  ///
+  /// G_UNMERGE_VALUES splits contiguous bits of the input into multiple
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre The entire register \p Res (and no more) must be covered by the input
+  ///      registers.
+  /// \pre The type of all \p Res registers must be identical.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildUnmerge(ArrayRef<unsigned> Res, unsigned Op);
+
+  MachineInstrBuilder buildInsert(unsigned Res, unsigned Src,
+                                  unsigned Op, unsigned Index);
+
+  /// Build and insert either a G_INTRINSIC (if \p HasSideEffects is false) or
+  /// G_INTRINSIC_W_SIDE_EFFECTS instruction. Its first operand will be the
+  /// result register definition unless \p Reg is NoReg (== 0). The second
+  /// operand will be the intrinsic's ID.
+  ///
+  /// Callers are expected to add the required definitions and uses afterwards.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, unsigned Res,
+                                     bool HasSideEffects);
+
+  /// Build and insert \p Res = G_FPTRUNC \p Op
+  ///
+  /// G_FPTRUNC converts a floating-point value into one with a smaller type.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  /// \pre \p Res must be smaller than \p Op
+  ///
+  /// \return The newly created instruction.
+  template <typename DstType, typename SrcType>
+  MachineInstrBuilder buildFPTrunc(DstType &&Res, SrcType &&Src) {
+    return buildFPTrunc(getDestFromArg(Res), getRegFromArg(Src));
+  }
+  MachineInstrBuilder buildFPTrunc(unsigned Res, unsigned Op);
+
+  /// Build and insert \p Res = G_TRUNC \p Op
+  ///
+  /// G_TRUNC extracts the low bits of a type. For a vector type each element is
+  /// truncated independently before being packed into the destination.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar or vector type.
+  /// \pre \p Op must be a generic virtual register with scalar or vector type.
+  /// \pre \p Res must be smaller than \p Op
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildTrunc(unsigned Res, unsigned Op);
+  template <typename DstType, typename SrcType>
+  MachineInstrBuilder buildTrunc(DstType &&Res, SrcType &&Src) {
+    return buildTrunc(getDestFromArg(Res), getRegFromArg(Src));
+  }
+
+  /// Build and insert a \p Res = G_ICMP \p Pred, \p Op0, \p Op1
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+
+  /// \pre \p Res must be a generic virtual register with scalar or
+  ///      vector type. Typically this starts as s1 or <N x s1>.
+  /// \pre \p Op0 and Op1 must be generic virtual registers with the
+  ///      same number of elements as \p Res. If \p Res is a scalar,
+  ///      \p Op0 must be either a scalar or pointer.
+  /// \pre \p Pred must be an integer predicate.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildICmp(CmpInst::Predicate Pred,
+                                unsigned Res, unsigned Op0, unsigned Op1);
+
+  /// Build and insert a \p Res = G_FCMP \p Pred\p Op0, \p Op1
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+
+  /// \pre \p Res must be a generic virtual register with scalar or
+  ///      vector type. Typically this starts as s1 or <N x s1>.
+  /// \pre \p Op0 and Op1 must be generic virtual registers with the
+  ///      same number of elements as \p Res (or scalar, if \p Res is
+  ///      scalar).
+  /// \pre \p Pred must be a floating-point predicate.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred,
+                                unsigned Res, unsigned Op0, unsigned Op1);
+
+  /// Build and insert a \p Res = G_SELECT \p Tst, \p Op0, \p Op1
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same type.
+  /// \pre \p Tst must be a generic virtual register with scalar, pointer or
+  ///      vector type. If vector then it must have the same number of
+  ///      elements as the other parameters.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildSelect(unsigned Res, unsigned Tst,
+                                  unsigned Op0, unsigned Op1);
+
+  /// Build and insert \p Res = G_INSERT_VECTOR_ELT \p Val,
+  /// \p Elt, \p Idx
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res and \p Val must be a generic virtual register
+  //       with the same vector type.
+  /// \pre \p Elt and \p Idx must be a generic virtual register
+  ///      with scalar type.
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildInsertVectorElement(unsigned Res, unsigned Val,
+                                               unsigned Elt, unsigned Idx);
+
+  /// Build and insert \p Res = G_EXTRACT_VECTOR_ELT \p Val, \p Idx
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with scalar type.
+  /// \pre \p Val must be a generic virtual register with vector type.
+  /// \pre \p Idx must be a generic virtual register with scalar type.
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildExtractVectorElement(unsigned Res, unsigned Val,
+                                                unsigned Idx);
+
+  /// Build and insert `OldValRes = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal,
+  /// MMO`.
+  ///
+  /// Atomically replace the value at \p Addr with \p NewVal if it is currently
+  /// \p CmpVal otherwise leaves it unchanged. Puts the original value from \p
+  /// Addr in \p Res.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p OldValRes must be a generic virtual register of scalar type.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  /// \pre \p OldValRes, \p CmpVal, and \p NewVal must be generic virtual
+  ///      registers of the same type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildAtomicCmpXchg(unsigned OldValRes, unsigned Addr,
+                                         unsigned CmpVal, unsigned NewVal,
+                                         MachineMemOperand &MMO);
+};
+
+} // End namespace llvm.
+#endif // LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegBankSelect.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
new file mode 100644
index 0000000..c53ae41
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
@@ -0,0 +1,665 @@
+//=- llvm/CodeGen/GlobalISel/RegBankSelect.h - Reg Bank Selector --*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file describes the interface of the MachineFunctionPass
+/// responsible for assigning the generic virtual registers to register bank.
+
+/// By default, the reg bank selector relies on local decisions to
+/// assign the register bank. In other words, it looks at one instruction
+/// at a time to decide where the operand of that instruction should live.
+///
+/// At higher optimization level, we could imagine that the reg bank selector
+/// would use more global analysis and do crazier thing like duplicating
+/// instructions and so on. This is future work.
+///
+/// For now, the pass uses a greedy algorithm to decide where the operand
+/// of an instruction should live. It asks the target which banks may be
+/// used for each operand of the instruction and what is the cost. Then,
+/// it chooses the solution which minimize the cost of the instruction plus
+/// the cost of any move that may be needed to the values into the right
+/// register bank.
+/// In other words, the cost for an instruction on a register bank RegBank
+/// is: Cost of I on RegBank plus the sum of the cost for bringing the
+/// input operands from their current register bank to RegBank.
+/// Thus, the following formula:
+/// cost(I, RegBank) = cost(I.Opcode, RegBank) +
+///    sum(for each arg in I.arguments: costCrossCopy(arg.RegBank, RegBank))
+///
+/// E.g., Let say we are assigning the register bank for the instruction
+/// defining v2.
+/// v0(A_REGBANK) = ...
+/// v1(A_REGBANK) = ...
+/// v2 = G_ADD i32 v0, v1 <-- MI
+///
+/// The target may say it can generate G_ADD i32 on register bank A and B
+/// with a cost of respectively 5 and 1.
+/// Then, let say the cost of a cross register bank copies from A to B is 1.
+/// The reg bank selector would compare the following two costs:
+/// cost(MI, A_REGBANK) = cost(G_ADD, A_REGBANK) + cost(v0.RegBank, A_REGBANK) +
+///    cost(v1.RegBank, A_REGBANK)
+///                     = 5 + cost(A_REGBANK, A_REGBANK) + cost(A_REGBANK,
+///                                                             A_REGBANK)
+///                     = 5 + 0 + 0 = 5
+/// cost(MI, B_REGBANK) = cost(G_ADD, B_REGBANK) + cost(v0.RegBank, B_REGBANK) +
+///    cost(v1.RegBank, B_REGBANK)
+///                     = 1 + cost(A_REGBANK, B_REGBANK) + cost(A_REGBANK,
+///                                                             B_REGBANK)
+///                     = 1 + 1 + 1 = 3
+/// Therefore, in this specific example, the reg bank selector would choose
+/// bank B for MI.
+/// v0(A_REGBANK) = ...
+/// v1(A_REGBANK) = ...
+/// tmp0(B_REGBANK) = COPY v0
+/// tmp1(B_REGBANK) = COPY v1
+/// v2(B_REGBANK) = G_ADD i32 tmp0, tmp1
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_REGBANKSELECT_H
+#define LLVM_CODEGEN_GLOBALISEL_REGBANKSELECT_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
+#include <cassert>
+#include <cstdint>
+#include <memory>
+
+namespace llvm {
+
+class BlockFrequency;
+class MachineBlockFrequencyInfo;
+class MachineBranchProbabilityInfo;
+class MachineOperand;
+class MachineRegisterInfo;
+class Pass;
+class raw_ostream;
+class TargetPassConfig;
+class TargetRegisterInfo;
+
+/// This pass implements the reg bank selector pass used in the GlobalISel
+/// pipeline. At the end of this pass, all register operands have been assigned
+class RegBankSelect : public MachineFunctionPass {
+public:
+  static char ID;
+
+  /// List of the modes supported by the RegBankSelect pass.
+  enum Mode {
+    /// Assign the register banks as fast as possible (default).
+    Fast,
+    /// Greedily minimize the cost of assigning register banks.
+    /// This should produce code of greater quality, but will
+    /// require more compile time.
+    Greedy
+  };
+
+  /// Abstract class used to represent an insertion point in a CFG.
+  /// This class records an insertion point and materializes it on
+  /// demand.
+  /// It allows to reason about the frequency of this insertion point,
+  /// without having to logically materialize it (e.g., on an edge),
+  /// before we actually need to insert something.
+  class InsertPoint {
+  protected:
+    /// Tell if the insert point has already been materialized.
+    bool WasMaterialized = false;
+
+    /// Materialize the insertion point.
+    ///
+    /// If isSplit() is true, this involves actually splitting
+    /// the block or edge.
+    ///
+    /// \post getPointImpl() returns a valid iterator.
+    /// \post getInsertMBBImpl() returns a valid basic block.
+    /// \post isSplit() == false ; no more splitting should be required.
+    virtual void materialize() = 0;
+
+    /// Return the materialized insertion basic block.
+    /// Code will be inserted into that basic block.
+    ///
+    /// \pre ::materialize has been called.
+    virtual MachineBasicBlock &getInsertMBBImpl() = 0;
+
+    /// Return the materialized insertion point.
+    /// Code will be inserted before that point.
+    ///
+    /// \pre ::materialize has been called.
+    virtual MachineBasicBlock::iterator getPointImpl() = 0;
+
+  public:
+    virtual ~InsertPoint() = default;
+
+    /// The first call to this method will cause the splitting to
+    /// happen if need be, then sub sequent calls just return
+    /// the iterator to that point. I.e., no more splitting will
+    /// occur.
+    ///
+    /// \return The iterator that should be used with
+    /// MachineBasicBlock::insert. I.e., additional code happens
+    /// before that point.
+    MachineBasicBlock::iterator getPoint() {
+      if (!WasMaterialized) {
+        WasMaterialized = true;
+        assert(canMaterialize() && "Impossible to materialize this point");
+        materialize();
+      }
+      // When we materialized the point we should have done the splitting.
+      assert(!isSplit() && "Wrong pre-condition");
+      return getPointImpl();
+    }
+
+    /// The first call to this method will cause the splitting to
+    /// happen if need be, then sub sequent calls just return
+    /// the basic block that contains the insertion point.
+    /// I.e., no more splitting will occur.
+    ///
+    /// \return The basic block should be used with
+    /// MachineBasicBlock::insert and ::getPoint. The new code should
+    /// happen before that point.
+    MachineBasicBlock &getInsertMBB() {
+      if (!WasMaterialized) {
+        WasMaterialized = true;
+        assert(canMaterialize() && "Impossible to materialize this point");
+        materialize();
+      }
+      // When we materialized the point we should have done the splitting.
+      assert(!isSplit() && "Wrong pre-condition");
+      return getInsertMBBImpl();
+    }
+
+    /// Insert \p MI in the just before ::getPoint()
+    MachineBasicBlock::iterator insert(MachineInstr &MI) {
+      return getInsertMBB().insert(getPoint(), &MI);
+    }
+
+    /// Does this point involve splitting an edge or block?
+    /// As soon as ::getPoint is called and thus, the point
+    /// materialized, the point will not require splitting anymore,
+    /// i.e., this will return false.
+    virtual bool isSplit() const { return false; }
+
+    /// Frequency of the insertion point.
+    /// \p P is used to access the various analysis that will help to
+    /// get that information, like MachineBlockFrequencyInfo.  If \p P
+    /// does not contain enough enough to return the actual frequency,
+    /// this returns 1.
+    virtual uint64_t frequency(const Pass &P) const { return 1; }
+
+    /// Check whether this insertion point can be materialized.
+    /// As soon as ::getPoint is called and thus, the point materialized
+    /// calling this method does not make sense.
+    virtual bool canMaterialize() const { return false; }
+  };
+
+  /// Insertion point before or after an instruction.
+  class InstrInsertPoint : public InsertPoint {
+  private:
+    /// Insertion point.
+    MachineInstr &Instr;
+
+    /// Does the insertion point is before or after Instr.
+    bool Before;
+
+    void materialize() override;
+
+    MachineBasicBlock::iterator getPointImpl() override {
+      if (Before)
+        return Instr;
+      return Instr.getNextNode() ? *Instr.getNextNode()
+                                 : Instr.getParent()->end();
+    }
+
+    MachineBasicBlock &getInsertMBBImpl() override {
+      return *Instr.getParent();
+    }
+
+  public:
+    /// Create an insertion point before (\p Before=true) or after \p Instr.
+    InstrInsertPoint(MachineInstr &Instr, bool Before = true);
+
+    bool isSplit() const override;
+    uint64_t frequency(const Pass &P) const override;
+
+    // Worst case, we need to slice the basic block, but that is still doable.
+    bool canMaterialize() const override { return true; }
+  };
+
+  /// Insertion point at the beginning or end of a basic block.
+  class MBBInsertPoint : public InsertPoint {
+  private:
+    /// Insertion point.
+    MachineBasicBlock &MBB;
+
+    /// Does the insertion point is at the beginning or end of MBB.
+    bool Beginning;
+
+    void materialize() override { /*Nothing to do to materialize*/
+    }
+
+    MachineBasicBlock::iterator getPointImpl() override {
+      return Beginning ? MBB.begin() : MBB.end();
+    }
+
+    MachineBasicBlock &getInsertMBBImpl() override { return MBB; }
+
+  public:
+    MBBInsertPoint(MachineBasicBlock &MBB, bool Beginning = true)
+        : InsertPoint(), MBB(MBB), Beginning(Beginning) {
+      // If we try to insert before phis, we should use the insertion
+      // points on the incoming edges.
+      assert((!Beginning || MBB.getFirstNonPHI() == MBB.begin()) &&
+             "Invalid beginning point");
+      // If we try to insert after the terminators, we should use the
+      // points on the outcoming edges.
+      assert((Beginning || MBB.getFirstTerminator() == MBB.end()) &&
+             "Invalid end point");
+    }
+
+    bool isSplit() const override { return false; }
+    uint64_t frequency(const Pass &P) const override;
+    bool canMaterialize() const override { return true; };
+  };
+
+  /// Insertion point on an edge.
+  class EdgeInsertPoint : public InsertPoint {
+  private:
+    /// Source of the edge.
+    MachineBasicBlock &Src;
+
+    /// Destination of the edge.
+    /// After the materialization is done, this hold the basic block
+    /// that resulted from the splitting.
+    MachineBasicBlock *DstOrSplit;
+
+    /// P is used to update the analysis passes as applicable.
+    Pass &P;
+
+    void materialize() override;
+
+    MachineBasicBlock::iterator getPointImpl() override {
+      // DstOrSplit should be the Split block at this point.
+      // I.e., it should have one predecessor, Src, and one successor,
+      // the original Dst.
+      assert(DstOrSplit && DstOrSplit->isPredecessor(&Src) &&
+             DstOrSplit->pred_size() == 1 && DstOrSplit->succ_size() == 1 &&
+             "Did not split?!");
+      return DstOrSplit->begin();
+    }
+
+    MachineBasicBlock &getInsertMBBImpl() override { return *DstOrSplit; }
+
+  public:
+    EdgeInsertPoint(MachineBasicBlock &Src, MachineBasicBlock &Dst, Pass &P)
+        : InsertPoint(), Src(Src), DstOrSplit(&Dst), P(P) {}
+
+    bool isSplit() const override {
+      return Src.succ_size() > 1 && DstOrSplit->pred_size() > 1;
+    }
+
+    uint64_t frequency(const Pass &P) const override;
+    bool canMaterialize() const override;
+  };
+
+  /// Struct used to represent the placement of a repairing point for
+  /// a given operand.
+  class RepairingPlacement {
+  public:
+    /// Define the kind of action this repairing needs.
+    enum RepairingKind {
+      /// Nothing to repair, just drop this action.
+      None,
+      /// Reparing code needs to happen before InsertPoints.
+      Insert,
+      /// (Re)assign the register bank of the operand.
+      Reassign,
+      /// Mark this repairing placement as impossible.
+      Impossible
+    };
+
+    /// \name Convenient types for a list of insertion points.
+    /// @{
+    using InsertionPoints = SmallVector<std::unique_ptr<InsertPoint>, 2>;
+    using insertpt_iterator = InsertionPoints::iterator;
+    using const_insertpt_iterator = InsertionPoints::const_iterator;
+    /// @}
+
+  private:
+    /// Kind of repairing.
+    RepairingKind Kind;
+    /// Index of the operand that will be repaired.
+    unsigned OpIdx;
+    /// Are all the insert points materializeable?
+    bool CanMaterialize;
+    /// Is there any of the insert points needing splitting?
+    bool HasSplit = false;
+    /// Insertion point for the repair code.
+    /// The repairing code needs to happen just before these points.
+    InsertionPoints InsertPoints;
+    /// Some insertion points may need to update the liveness and such.
+    Pass &P;
+
+  public:
+    /// Create a repairing placement for the \p OpIdx-th operand of
+    /// \p MI. \p TRI is used to make some checks on the register aliases
+    /// if the machine operand is a physical register. \p P is used to
+    /// to update liveness information and such when materializing the
+    /// points.
+    RepairingPlacement(MachineInstr &MI, unsigned OpIdx,
+                       const TargetRegisterInfo &TRI, Pass &P,
+                       RepairingKind Kind = RepairingKind::Insert);
+
+    /// \name Getters.
+    /// @{
+    RepairingKind getKind() const { return Kind; }
+    unsigned getOpIdx() const { return OpIdx; }
+    bool canMaterialize() const { return CanMaterialize; }
+    bool hasSplit() { return HasSplit; }
+    /// @}
+
+    /// \name Overloaded methods to add an insertion point.
+    /// @{
+    /// Add a MBBInsertionPoint to the list of InsertPoints.
+    void addInsertPoint(MachineBasicBlock &MBB, bool Beginning);
+    /// Add a InstrInsertionPoint to the list of InsertPoints.
+    void addInsertPoint(MachineInstr &MI, bool Before);
+    /// Add an EdgeInsertionPoint (\p Src, \p Dst) to the list of InsertPoints.
+    void addInsertPoint(MachineBasicBlock &Src, MachineBasicBlock &Dst);
+    /// Add an InsertPoint to the list of insert points.
+    /// This method takes the ownership of &\p Point.
+    void addInsertPoint(InsertPoint &Point);
+    /// @}
+
+    /// \name Accessors related to the insertion points.
+    /// @{
+    insertpt_iterator begin() { return InsertPoints.begin(); }
+    insertpt_iterator end() { return InsertPoints.end(); }
+
+    const_insertpt_iterator begin() const { return InsertPoints.begin(); }
+    const_insertpt_iterator end() const { return InsertPoints.end(); }
+
+    unsigned getNumInsertPoints() const { return InsertPoints.size(); }
+    /// @}
+
+    /// Change the type of this repairing placement to \p NewKind.
+    /// It is not possible to switch a repairing placement to the
+    /// RepairingKind::Insert. There is no fundamental problem with
+    /// that, but no uses as well, so do not support it for now.
+    ///
+    /// \pre NewKind != RepairingKind::Insert
+    /// \post getKind() == NewKind
+    void switchTo(RepairingKind NewKind) {
+      assert(NewKind != Kind && "Already of the right Kind");
+      Kind = NewKind;
+      InsertPoints.clear();
+      CanMaterialize = NewKind != RepairingKind::Impossible;
+      HasSplit = false;
+      assert(NewKind != RepairingKind::Insert &&
+             "We would need more MI to switch to Insert");
+    }
+  };
+
+private:
+  /// Helper class used to represent the cost for mapping an instruction.
+  /// When mapping an instruction, we may introduce some repairing code.
+  /// In most cases, the repairing code is local to the instruction,
+  /// thus, we can omit the basic block frequency from the cost.
+  /// However, some alternatives may produce non-local cost, e.g., when
+  /// repairing a phi, and thus we then need to scale the local cost
+  /// to the non-local cost. This class does this for us.
+  /// \note: We could simply always scale the cost. The problem is that
+  /// there are higher chances that we saturate the cost easier and end
+  /// up having the same cost for actually different alternatives.
+  /// Another option would be to use APInt everywhere.
+  class MappingCost {
+  private:
+    /// Cost of the local instructions.
+    /// This cost is free of basic block frequency.
+    uint64_t LocalCost = 0;
+    /// Cost of the non-local instructions.
+    /// This cost should include the frequency of the related blocks.
+    uint64_t NonLocalCost = 0;
+    /// Frequency of the block where the local instructions live.
+    uint64_t LocalFreq;
+
+    MappingCost(uint64_t LocalCost, uint64_t NonLocalCost, uint64_t LocalFreq)
+        : LocalCost(LocalCost), NonLocalCost(NonLocalCost),
+          LocalFreq(LocalFreq) {}
+
+    /// Check if this cost is saturated.
+    bool isSaturated() const;
+
+  public:
+    /// Create a MappingCost assuming that most of the instructions
+    /// will occur in a basic block with \p LocalFreq frequency.
+    MappingCost(const BlockFrequency &LocalFreq);
+
+    /// Add \p Cost to the local cost.
+    /// \return true if this cost is saturated, false otherwise.
+    bool addLocalCost(uint64_t Cost);
+
+    /// Add \p Cost to the non-local cost.
+    /// Non-local cost should reflect the frequency of their placement.
+    /// \return true if this cost is saturated, false otherwise.
+    bool addNonLocalCost(uint64_t Cost);
+
+    /// Saturate the cost to the maximal representable value.
+    void saturate();
+
+    /// Return an instance of MappingCost that represents an
+    /// impossible mapping.
+    static MappingCost ImpossibleCost();
+
+    /// Check if this is less than \p Cost.
+    bool operator<(const MappingCost &Cost) const;
+    /// Check if this is equal to \p Cost.
+    bool operator==(const MappingCost &Cost) const;
+    /// Check if this is not equal to \p Cost.
+    bool operator!=(const MappingCost &Cost) const { return !(*this == Cost); }
+    /// Check if this is greater than \p Cost.
+    bool operator>(const MappingCost &Cost) const {
+      return *this != Cost && Cost < *this;
+    }
+
+    /// Print this on dbgs() stream.
+    void dump() const;
+
+    /// Print this on \p OS;
+    void print(raw_ostream &OS) const;
+
+    /// Overload the stream operator for easy debug printing.
+    friend raw_ostream &operator<<(raw_ostream &OS, const MappingCost &Cost) {
+      Cost.print(OS);
+      return OS;
+    }
+  };
+
+  /// Interface to the target lowering info related
+  /// to register banks.
+  const RegisterBankInfo *RBI = nullptr;
+
+  /// MRI contains all the register class/bank information that this
+  /// pass uses and updates.
+  MachineRegisterInfo *MRI = nullptr;
+
+  /// Information on the register classes for the current function.
+  const TargetRegisterInfo *TRI = nullptr;
+
+  /// Get the frequency of blocks.
+  /// This is required for non-fast mode.
+  MachineBlockFrequencyInfo *MBFI = nullptr;
+
+  /// Get the frequency of the edges.
+  /// This is required for non-fast mode.
+  MachineBranchProbabilityInfo *MBPI = nullptr;
+
+  /// Current optimization remark emitter. Used to report failures.
+  std::unique_ptr<MachineOptimizationRemarkEmitter> MORE;
+
+  /// Helper class used for every code morphing.
+  MachineIRBuilder MIRBuilder;
+
+  /// Optimization mode of the pass.
+  Mode OptMode;
+
+  /// Current target configuration. Controls how the pass handles errors.
+  const TargetPassConfig *TPC;
+
+  /// Assign the register bank of each operand of \p MI.
+  /// \return True on success, false otherwise.
+  bool assignInstr(MachineInstr &MI);
+
+  /// Initialize the field members using \p MF.
+  void init(MachineFunction &MF);
+
+  /// Check if \p Reg is already assigned what is described by \p ValMapping.
+  /// \p OnlyAssign == true means that \p Reg just needs to be assigned a
+  /// register bank.  I.e., no repairing is necessary to have the
+  /// assignment match.
+  bool assignmentMatch(unsigned Reg,
+                       const RegisterBankInfo::ValueMapping &ValMapping,
+                       bool &OnlyAssign) const;
+
+  /// Insert repairing code for \p Reg as specified by \p ValMapping.
+  /// The repairing placement is specified by \p RepairPt.
+  /// \p NewVRegs contains all the registers required to remap \p Reg.
+  /// In other words, the number of registers in NewVRegs must be equal
+  /// to ValMapping.BreakDown.size().
+  ///
+  /// The transformation could be sketched as:
+  /// \code
+  /// ... = op Reg
+  /// \endcode
+  /// Becomes
+  /// \code
+  /// <NewRegs> = COPY or extract Reg
+  /// ... = op Reg
+  /// \endcode
+  ///
+  /// and
+  /// \code
+  /// Reg = op ...
+  /// \endcode
+  /// Becomes
+  /// \code
+  /// Reg = op ...
+  /// Reg = COPY or build_sequence <NewRegs>
+  /// \endcode
+  ///
+  /// \pre NewVRegs.size() == ValMapping.BreakDown.size()
+  ///
+  /// \note The caller is supposed to do the rewriting of op if need be.
+  /// I.e., Reg = op ... => <NewRegs> = NewOp ...
+  ///
+  /// \return True if the repairing worked, false otherwise.
+  bool repairReg(MachineOperand &MO,
+                 const RegisterBankInfo::ValueMapping &ValMapping,
+                 RegBankSelect::RepairingPlacement &RepairPt,
+                 const iterator_range<SmallVectorImpl<unsigned>::const_iterator>
+                     &NewVRegs);
+
+  /// Return the cost of the instruction needed to map \p MO to \p ValMapping.
+  /// The cost is free of basic block frequencies.
+  /// \pre MO.isReg()
+  /// \pre MO is assigned to a register bank.
+  /// \pre ValMapping is a valid mapping for MO.
+  uint64_t
+  getRepairCost(const MachineOperand &MO,
+                const RegisterBankInfo::ValueMapping &ValMapping) const;
+
+  /// Find the best mapping for \p MI from \p PossibleMappings.
+  /// \return a reference on the best mapping in \p PossibleMappings.
+  const RegisterBankInfo::InstructionMapping &
+  findBestMapping(MachineInstr &MI,
+                  RegisterBankInfo::InstructionMappings &PossibleMappings,
+                  SmallVectorImpl<RepairingPlacement> &RepairPts);
+
+  /// Compute the cost of mapping \p MI with \p InstrMapping and
+  /// compute the repairing placement for such mapping in \p
+  /// RepairPts.
+  /// \p BestCost is used to specify when the cost becomes too high
+  /// and thus it is not worth computing the RepairPts.  Moreover if
+  /// \p BestCost == nullptr, the mapping cost is actually not
+  /// computed.
+  MappingCost
+  computeMapping(MachineInstr &MI,
+                 const RegisterBankInfo::InstructionMapping &InstrMapping,
+                 SmallVectorImpl<RepairingPlacement> &RepairPts,
+                 const MappingCost *BestCost = nullptr);
+
+  /// When \p RepairPt involves splitting to repair \p MO for the
+  /// given \p ValMapping, try to change the way we repair such that
+  /// the splitting is not required anymore.
+  ///
+  /// \pre \p RepairPt.hasSplit()
+  /// \pre \p MO == MO.getParent()->getOperand(\p RepairPt.getOpIdx())
+  /// \pre \p ValMapping is the mapping of \p MO for MO.getParent()
+  ///      that implied \p RepairPt.
+  void tryAvoidingSplit(RegBankSelect::RepairingPlacement &RepairPt,
+                        const MachineOperand &MO,
+                        const RegisterBankInfo::ValueMapping &ValMapping) const;
+
+  /// Apply \p Mapping to \p MI. \p RepairPts represents the different
+  /// mapping action that need to happen for the mapping to be
+  /// applied.
+  /// \return True if the mapping was applied sucessfully, false otherwise.
+  bool applyMapping(MachineInstr &MI,
+                    const RegisterBankInfo::InstructionMapping &InstrMapping,
+                    SmallVectorImpl<RepairingPlacement> &RepairPts);
+
+public:
+  /// Create a RegBankSelect pass with the specified \p RunningMode.
+  RegBankSelect(Mode RunningMode = Fast);
+
+  StringRef getPassName() const override { return "RegBankSelect"; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  MachineFunctionProperties getRequiredProperties() const override {
+    return MachineFunctionProperties()
+        .set(MachineFunctionProperties::Property::IsSSA)
+        .set(MachineFunctionProperties::Property::Legalized);
+  }
+
+  MachineFunctionProperties getSetProperties() const override {
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::RegBankSelected);
+  }
+
+  /// Walk through \p MF and assign a register bank to every virtual register
+  /// that are still mapped to nothing.
+  /// The target needs to provide a RegisterBankInfo and in particular
+  /// override RegisterBankInfo::getInstrMapping.
+  ///
+  /// Simplified algo:
+  /// \code
+  ///   RBI = MF.subtarget.getRegBankInfo()
+  ///   MIRBuilder.setMF(MF)
+  ///   for each bb in MF
+  ///     for each inst in bb
+  ///       MIRBuilder.setInstr(inst)
+  ///       MappingCosts = RBI.getMapping(inst);
+  ///       Idx = findIdxOfMinCost(MappingCosts)
+  ///       CurRegBank = MappingCosts[Idx].RegBank
+  ///       MRI.setRegBank(inst.getOperand(0).getReg(), CurRegBank)
+  ///       for each argument in inst
+  ///         if (CurRegBank != argument.RegBank)
+  ///           ArgReg = argument.getReg()
+  ///           Tmp = MRI.createNewVirtual(MRI.getSize(ArgReg), CurRegBank)
+  ///           MIRBuilder.buildInstr(COPY, Tmp, ArgReg)
+  ///           inst.getOperand(argument.getOperandNo()).setReg(Tmp)
+  /// \endcode
+  bool runOnMachineFunction(MachineFunction &MF) override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_REGBANKSELECT_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBank.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBank.h
new file mode 100644
index 0000000..5d75842
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBank.h
@@ -0,0 +1,99 @@
+//==-- llvm/CodeGen/GlobalISel/RegisterBank.h - Register Bank ----*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API of register banks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_REGBANK_H
+#define LLVM_CODEGEN_GLOBALISEL_REGBANK_H
+
+#include "llvm/ADT/BitVector.h"
+
+namespace llvm {
+// Forward declarations.
+class RegisterBankInfo;
+class raw_ostream;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+/// This class implements the register bank concept.
+/// Two instances of RegisterBank must have different ID.
+/// This property is enforced by the RegisterBankInfo class.
+class RegisterBank {
+private:
+  unsigned ID;
+  const char *Name;
+  unsigned Size;
+  BitVector ContainedRegClasses;
+
+  /// Sentinel value used to recognize register bank not properly
+  /// initialized yet.
+  static const unsigned InvalidID;
+
+  /// Only the RegisterBankInfo can initialize RegisterBank properly.
+  friend RegisterBankInfo;
+
+public:
+  RegisterBank(unsigned ID, const char *Name, unsigned Size,
+               const uint32_t *ContainedRegClasses, unsigned NumRegClasses);
+
+  /// Get the identifier of this register bank.
+  unsigned getID() const { return ID; }
+
+  /// Get a user friendly name of this register bank.
+  /// Should be used only for debugging purposes.
+  const char *getName() const { return Name; }
+
+  /// Get the maximal size in bits that fits in this register bank.
+  unsigned getSize() const { return Size; }
+
+  /// Check whether this instance is ready to be used.
+  bool isValid() const;
+
+  /// Check if this register bank is valid. In other words,
+  /// if it has been properly constructed.
+  ///
+  /// \note This method does not check anything when assertions are disabled.
+  ///
+  /// \return True is the check was successful.
+  bool verify(const TargetRegisterInfo &TRI) const;
+
+  /// Check whether this register bank covers \p RC.
+  /// In other words, check if this register bank fully covers
+  /// the registers that \p RC contains.
+  /// \pre isValid()
+  bool covers(const TargetRegisterClass &RC) const;
+
+  /// Check whether \p OtherRB is the same as this.
+  bool operator==(const RegisterBank &OtherRB) const;
+  bool operator!=(const RegisterBank &OtherRB) const {
+    return !this->operator==(OtherRB);
+  }
+
+  /// Dump the register mask on dbgs() stream.
+  /// The dump is verbose.
+  void dump(const TargetRegisterInfo *TRI = nullptr) const;
+
+  /// Print the register mask on OS.
+  /// If IsForDebug is false, then only the name of the register bank
+  /// is printed. Otherwise, all the fields are printing.
+  /// TRI is then used to print the name of the register classes that
+  /// this register bank covers.
+  void print(raw_ostream &OS, bool IsForDebug = false,
+             const TargetRegisterInfo *TRI = nullptr) const;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const RegisterBank &RegBank) {
+  RegBank.print(OS);
+  return OS;
+}
+} // End namespace llvm.
+
+#endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
new file mode 100644
index 0000000..82fd7ed
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
@@ -0,0 +1,757 @@
+//===- llvm/CodeGen/GlobalISel/RegisterBankInfo.h ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API for the register bank info.
+/// This API is responsible for handling the register banks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_REGISTERBANKINFO_H
+#define LLVM_CODEGEN_GLOBALISEL_REGISTERBANKINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <initializer_list>
+#include <memory>
+
+namespace llvm {
+
+class MachineInstr;
+class MachineRegisterInfo;
+class raw_ostream;
+class RegisterBank;
+class TargetInstrInfo;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+
+/// Holds all the information related to register banks.
+class RegisterBankInfo {
+public:
+  /// Helper struct that represents how a value is partially mapped
+  /// into a register.
+  /// The StartIdx and Length represent what region of the orginal
+  /// value this partial mapping covers.
+  /// This can be represented as a Mask of contiguous bit starting
+  /// at StartIdx bit and spanning Length bits.
+  /// StartIdx is the number of bits from the less significant bits.
+  struct PartialMapping {
+    /// Number of bits at which this partial mapping starts in the
+    /// original value.  The bits are counted from less significant
+    /// bits to most significant bits.
+    unsigned StartIdx;
+
+    /// Length of this mapping in bits. This is how many bits this
+    /// partial mapping covers in the original value:
+    /// from StartIdx to StartIdx + Length -1.
+    unsigned Length;
+
+    /// Register bank where the partial value lives.
+    const RegisterBank *RegBank;
+
+    PartialMapping() = default;
+
+    /// Provide a shortcut for quickly building PartialMapping.
+    PartialMapping(unsigned StartIdx, unsigned Length,
+                   const RegisterBank &RegBank)
+        : StartIdx(StartIdx), Length(Length), RegBank(&RegBank) {}
+
+    /// \return the index of in the original value of the most
+    /// significant bit that this partial mapping covers.
+    unsigned getHighBitIdx() const { return StartIdx + Length - 1; }
+
+    /// Print this partial mapping on dbgs() stream.
+    void dump() const;
+
+    /// Print this partial mapping on \p OS;
+    void print(raw_ostream &OS) const;
+
+    /// Check that the Mask is compatible with the RegBank.
+    /// Indeed, if the RegBank cannot accomadate the "active bits" of the mask,
+    /// there is no way this mapping is valid.
+    ///
+    /// \note This method does not check anything when assertions are disabled.
+    ///
+    /// \return True is the check was successful.
+    bool verify() const;
+  };
+
+  /// Helper struct that represents how a value is mapped through
+  /// different register banks.
+  ///
+  /// \note: So far we do not have any users of the complex mappings
+  /// (mappings with more than one partial mapping), but when we do,
+  /// we would have needed to duplicate partial mappings.
+  /// The alternative could be to use an array of pointers of partial
+  /// mapping (i.e., PartialMapping **BreakDown) and duplicate the
+  /// pointers instead.
+  ///
+  /// E.g.,
+  /// Let say we have a 32-bit add and a <2 x 32-bit> vadd. We
+  /// can expand the
+  /// <2 x 32-bit> add into 2 x 32-bit add.
+  ///
+  /// Currently the TableGen-like file would look like:
+  /// \code
+  /// PartialMapping[] = {
+  /// /*32-bit add*/ {0, 32, GPR},
+  /// /*2x32-bit add*/ {0, 32, GPR}, {0, 32, GPR}, // <-- Same entry 3x
+  /// /*<2x32-bit> vadd {0, 64, VPR}
+  /// }; // PartialMapping duplicated.
+  ///
+  /// ValueMapping[] {
+  ///   /*plain 32-bit add*/ {&PartialMapping[0], 1},
+  ///   /*expanded vadd on 2xadd*/ {&PartialMapping[1], 2},
+  ///   /*plain <2x32-bit> vadd*/ {&PartialMapping[3], 1}
+  /// };
+  /// \endcode
+  ///
+  /// With the array of pointer, we would have:
+  /// \code
+  /// PartialMapping[] = {
+  /// /*32-bit add*/ {0, 32, GPR},
+  /// /*<2x32-bit> vadd {0, 64, VPR}
+  /// }; // No more duplication.
+  ///
+  /// BreakDowns[] = {
+  /// /*AddBreakDown*/ &PartialMapping[0],
+  /// /*2xAddBreakDown*/ &PartialMapping[0], &PartialMapping[0],
+  /// /*VAddBreakDown*/ &PartialMapping[1]
+  /// }; // Addresses of PartialMapping duplicated (smaller).
+  ///
+  /// ValueMapping[] {
+  ///   /*plain 32-bit add*/ {&BreakDowns[0], 1},
+  ///   /*expanded vadd on 2xadd*/ {&BreakDowns[1], 2},
+  ///   /*plain <2x32-bit> vadd*/ {&BreakDowns[3], 1}
+  /// };
+  /// \endcode
+  ///
+  /// Given that a PartialMapping is actually small, the code size
+  /// impact is actually a degradation. Moreover the compile time will
+  /// be hit by the additional indirection.
+  /// If PartialMapping gets bigger we may reconsider.
+  struct ValueMapping {
+    /// How the value is broken down between the different register banks.
+    const PartialMapping *BreakDown;
+
+    /// Number of partial mapping to break down this value.
+    unsigned NumBreakDowns;
+
+    /// The default constructor creates an invalid (isValid() == false)
+    /// instance.
+    ValueMapping() : ValueMapping(nullptr, 0) {}
+
+    /// Initialize a ValueMapping with the given parameter.
+    /// \p BreakDown needs to have a life time at least as long
+    /// as this instance.
+    ValueMapping(const PartialMapping *BreakDown, unsigned NumBreakDowns)
+        : BreakDown(BreakDown), NumBreakDowns(NumBreakDowns) {}
+
+    /// Iterators through the PartialMappings.
+    const PartialMapping *begin() const { return BreakDown; }
+    const PartialMapping *end() const { return BreakDown + NumBreakDowns; }
+
+    /// Check if this ValueMapping is valid.
+    bool isValid() const { return BreakDown && NumBreakDowns; }
+
+    /// Verify that this mapping makes sense for a value of
+    /// \p MeaningfulBitWidth.
+    /// \note This method does not check anything when assertions are disabled.
+    ///
+    /// \return True is the check was successful.
+    bool verify(unsigned MeaningfulBitWidth) const;
+
+    /// Print this on dbgs() stream.
+    void dump() const;
+
+    /// Print this on \p OS;
+    void print(raw_ostream &OS) const;
+  };
+
+  /// Helper class that represents how the value of an instruction may be
+  /// mapped and what is the related cost of such mapping.
+  class InstructionMapping {
+    /// Identifier of the mapping.
+    /// This is used to communicate between the target and the optimizers
+    /// which mapping should be realized.
+    unsigned ID = InvalidMappingID;
+
+    /// Cost of this mapping.
+    unsigned Cost = 0;
+
+    /// Mapping of all the operands.
+    const ValueMapping *OperandsMapping;
+
+    /// Number of operands.
+    unsigned NumOperands = 0;
+
+    const ValueMapping &getOperandMapping(unsigned i) {
+      assert(i < getNumOperands() && "Out of bound operand");
+      return OperandsMapping[i];
+    }
+
+  public:
+    /// Constructor for the mapping of an instruction.
+    /// \p NumOperands must be equal to number of all the operands of
+    /// the related instruction.
+    /// The rationale is that it is more efficient for the optimizers
+    /// to be able to assume that the mapping of the ith operand is
+    /// at the index i.
+    ///
+    /// \pre ID != InvalidMappingID
+    InstructionMapping(unsigned ID, unsigned Cost,
+                       const ValueMapping *OperandsMapping,
+                       unsigned NumOperands)
+        : ID(ID), Cost(Cost), OperandsMapping(OperandsMapping),
+          NumOperands(NumOperands) {
+      assert(getID() != InvalidMappingID &&
+             "Use the default constructor for invalid mapping");
+    }
+
+    /// Default constructor.
+    /// Use this constructor to express that the mapping is invalid.
+    InstructionMapping() = default;
+
+    /// Get the cost.
+    unsigned getCost() const { return Cost; }
+
+    /// Get the ID.
+    unsigned getID() const { return ID; }
+
+    /// Get the number of operands.
+    unsigned getNumOperands() const { return NumOperands; }
+
+    /// Get the value mapping of the ith operand.
+    /// \pre The mapping for the ith operand has been set.
+    /// \pre The ith operand is a register.
+    const ValueMapping &getOperandMapping(unsigned i) const {
+      const ValueMapping &ValMapping =
+          const_cast<InstructionMapping *>(this)->getOperandMapping(i);
+      return ValMapping;
+    }
+
+    /// Set the mapping for all the operands.
+    /// In other words, OpdsMapping should hold at least getNumOperands
+    /// ValueMapping.
+    void setOperandsMapping(const ValueMapping *OpdsMapping) {
+      OperandsMapping = OpdsMapping;
+    }
+
+    /// Check whether this object is valid.
+    /// This is a lightweight check for obvious wrong instance.
+    bool isValid() const {
+      return getID() != InvalidMappingID && OperandsMapping;
+    }
+
+    /// Verifiy that this mapping makes sense for \p MI.
+    /// \pre \p MI must be connected to a MachineFunction.
+    ///
+    /// \note This method does not check anything when assertions are disabled.
+    ///
+    /// \return True is the check was successful.
+    bool verify(const MachineInstr &MI) const;
+
+    /// Print this on dbgs() stream.
+    void dump() const;
+
+    /// Print this on \p OS;
+    void print(raw_ostream &OS) const;
+  };
+
+  /// Convenient type to represent the alternatives for mapping an
+  /// instruction.
+  /// \todo When we move to TableGen this should be an array ref.
+  using InstructionMappings = SmallVector<const InstructionMapping *, 4>;
+
+  /// Helper class used to get/create the virtual registers that will be used
+  /// to replace the MachineOperand when applying a mapping.
+  class OperandsMapper {
+    /// The OpIdx-th cell contains the index in NewVRegs where the VRegs of the
+    /// OpIdx-th operand starts. -1 means we do not have such mapping yet.
+    /// Note: We use a SmallVector to avoid heap allocation for most cases.
+    SmallVector<int, 8> OpToNewVRegIdx;
+
+    /// Hold the registers that will be used to map MI with InstrMapping.
+    SmallVector<unsigned, 8> NewVRegs;
+
+    /// Current MachineRegisterInfo, used to create new virtual registers.
+    MachineRegisterInfo &MRI;
+
+    /// Instruction being remapped.
+    MachineInstr &MI;
+
+    /// New mapping of the instruction.
+    const InstructionMapping &InstrMapping;
+
+    /// Constant value identifying that the index in OpToNewVRegIdx
+    /// for an operand has not been set yet.
+    static const int DontKnowIdx;
+
+    /// Get the range in NewVRegs to store all the partial
+    /// values for the \p OpIdx-th operand.
+    ///
+    /// \return The iterator range for the space created.
+    //
+    /// \pre getMI().getOperand(OpIdx).isReg()
+    iterator_range<SmallVectorImpl<unsigned>::iterator>
+    getVRegsMem(unsigned OpIdx);
+
+    /// Get the end iterator for a range starting at \p StartIdx and
+    /// spannig \p NumVal in NewVRegs.
+    /// \pre StartIdx + NumVal <= NewVRegs.size()
+    SmallVectorImpl<unsigned>::const_iterator
+    getNewVRegsEnd(unsigned StartIdx, unsigned NumVal) const;
+    SmallVectorImpl<unsigned>::iterator getNewVRegsEnd(unsigned StartIdx,
+                                                       unsigned NumVal);
+
+  public:
+    /// Create an OperandsMapper that will hold the information to apply \p
+    /// InstrMapping to \p MI.
+    /// \pre InstrMapping.verify(MI)
+    OperandsMapper(MachineInstr &MI, const InstructionMapping &InstrMapping,
+                   MachineRegisterInfo &MRI);
+
+    /// \name Getters.
+    /// @{
+    /// The MachineInstr being remapped.
+    MachineInstr &getMI() const { return MI; }
+
+    /// The final mapping of the instruction.
+    const InstructionMapping &getInstrMapping() const { return InstrMapping; }
+
+    /// The MachineRegisterInfo we used to realize the mapping.
+    MachineRegisterInfo &getMRI() const { return MRI; }
+    /// @}
+
+    /// Create as many new virtual registers as needed for the mapping of the \p
+    /// OpIdx-th operand.
+    /// The number of registers is determined by the number of breakdown for the
+    /// related operand in the instruction mapping.
+    /// The type of the new registers is a plain scalar of the right size.
+    /// The proper type is expected to be set when the mapping is applied to
+    /// the instruction(s) that realizes the mapping.
+    ///
+    /// \pre getMI().getOperand(OpIdx).isReg()
+    ///
+    /// \post All the partial mapping of the \p OpIdx-th operand have been
+    /// assigned a new virtual register.
+    void createVRegs(unsigned OpIdx);
+
+    /// Set the virtual register of the \p PartialMapIdx-th partial mapping of
+    /// the OpIdx-th operand to \p NewVReg.
+    ///
+    /// \pre getMI().getOperand(OpIdx).isReg()
+    /// \pre getInstrMapping().getOperandMapping(OpIdx).BreakDown.size() >
+    /// PartialMapIdx
+    /// \pre NewReg != 0
+    ///
+    /// \post the \p PartialMapIdx-th register of the value mapping of the \p
+    /// OpIdx-th operand has been set.
+    void setVRegs(unsigned OpIdx, unsigned PartialMapIdx, unsigned NewVReg);
+
+    /// Get all the virtual registers required to map the \p OpIdx-th operand of
+    /// the instruction.
+    ///
+    /// This return an empty range when createVRegs or setVRegs has not been
+    /// called.
+    /// The iterator may be invalidated by a call to setVRegs or createVRegs.
+    ///
+    /// When \p ForDebug is true, we will not check that the list of new virtual
+    /// registers does not contain uninitialized values.
+    ///
+    /// \pre getMI().getOperand(OpIdx).isReg()
+    /// \pre ForDebug || All partial mappings have been set a register
+    iterator_range<SmallVectorImpl<unsigned>::const_iterator>
+    getVRegs(unsigned OpIdx, bool ForDebug = false) const;
+
+    /// Print this operands mapper on dbgs() stream.
+    void dump() const;
+
+    /// Print this operands mapper on \p OS stream.
+    void print(raw_ostream &OS, bool ForDebug = false) const;
+  };
+
+protected:
+  /// Hold the set of supported register banks.
+  RegisterBank **RegBanks;
+
+  /// Total number of register banks.
+  unsigned NumRegBanks;
+
+  /// Keep dynamically allocated PartialMapping in a separate map.
+  /// This shouldn't be needed when everything gets TableGen'ed.
+  mutable DenseMap<unsigned, std::unique_ptr<const PartialMapping>>
+      MapOfPartialMappings;
+
+  /// Keep dynamically allocated ValueMapping in a separate map.
+  /// This shouldn't be needed when everything gets TableGen'ed.
+  mutable DenseMap<unsigned, std::unique_ptr<const ValueMapping>>
+      MapOfValueMappings;
+
+  /// Keep dynamically allocated array of ValueMapping in a separate map.
+  /// This shouldn't be needed when everything gets TableGen'ed.
+  mutable DenseMap<unsigned, std::unique_ptr<ValueMapping[]>>
+      MapOfOperandsMappings;
+
+  /// Keep dynamically allocated InstructionMapping in a separate map.
+  /// This shouldn't be needed when everything gets TableGen'ed.
+  mutable DenseMap<unsigned, std::unique_ptr<const InstructionMapping>>
+      MapOfInstructionMappings;
+
+  /// Getting the minimal register class of a physreg is expensive.
+  /// Cache this information as we get it.
+  mutable DenseMap<unsigned, const TargetRegisterClass *> PhysRegMinimalRCs;
+
+  /// Create a RegisterBankInfo that can accommodate up to \p NumRegBanks
+  /// RegisterBank instances.
+  RegisterBankInfo(RegisterBank **RegBanks, unsigned NumRegBanks);
+
+  /// This constructor is meaningless.
+  /// It just provides a default constructor that can be used at link time
+  /// when GlobalISel is not built.
+  /// That way, targets can still inherit from this class without doing
+  /// crazy gymnastic to avoid link time failures.
+  /// \note That works because the constructor is inlined.
+  RegisterBankInfo() {
+    llvm_unreachable("This constructor should not be executed");
+  }
+
+  /// Get the register bank identified by \p ID.
+  RegisterBank &getRegBank(unsigned ID) {
+    assert(ID < getNumRegBanks() && "Accessing an unknown register bank");
+    return *RegBanks[ID];
+  }
+
+  /// Get the MinimalPhysRegClass for Reg.
+  /// \pre Reg is a physical register.
+  const TargetRegisterClass &
+  getMinimalPhysRegClass(unsigned Reg, const TargetRegisterInfo &TRI) const;
+
+  /// Try to get the mapping of \p MI.
+  /// See getInstrMapping for more details on what a mapping represents.
+  ///
+  /// Unlike getInstrMapping the returned InstructionMapping may be invalid
+  /// (isValid() == false).
+  /// This means that the target independent code is not smart enough
+  /// to get the mapping of \p MI and thus, the target has to provide the
+  /// information for \p MI.
+  ///
+  /// This implementation is able to get the mapping of:
+  /// - Target specific instructions by looking at the encoding constraints.
+  /// - Any instruction if all the register operands have already been assigned
+  ///   a register, a register class, or a register bank.
+  /// - Copies and phis if at least one of the operands has been assigned a
+  ///   register, a register class, or a register bank.
+  /// In other words, this method will likely fail to find a mapping for
+  /// any generic opcode that has not been lowered by target specific code.
+  const InstructionMapping &getInstrMappingImpl(const MachineInstr &MI) const;
+
+  /// Get the uniquely generated PartialMapping for the
+  /// given arguments.
+  const PartialMapping &getPartialMapping(unsigned StartIdx, unsigned Length,
+                                          const RegisterBank &RegBank) const;
+
+  /// \name Methods to get a uniquely generated ValueMapping.
+  /// @{
+
+  /// The most common ValueMapping consists of a single PartialMapping.
+  /// Feature a method for that.
+  const ValueMapping &getValueMapping(unsigned StartIdx, unsigned Length,
+                                      const RegisterBank &RegBank) const;
+
+  /// Get the ValueMapping for the given arguments.
+  const ValueMapping &getValueMapping(const PartialMapping *BreakDown,
+                                      unsigned NumBreakDowns) const;
+  /// @}
+
+  /// \name Methods to get a uniquely generated array of ValueMapping.
+  /// @{
+
+  /// Get the uniquely generated array of ValueMapping for the
+  /// elements of between \p Begin and \p End.
+  ///
+  /// Elements that are nullptr will be replaced by
+  /// invalid ValueMapping (ValueMapping::isValid == false).
+  ///
+  /// \pre The pointers on ValueMapping between \p Begin and \p End
+  /// must uniquely identify a ValueMapping. Otherwise, there is no
+  /// guarantee that the return instance will be unique, i.e., another
+  /// OperandsMapping could have the same content.
+  template <typename Iterator>
+  const ValueMapping *getOperandsMapping(Iterator Begin, Iterator End) const;
+
+  /// Get the uniquely generated array of ValueMapping for the
+  /// elements of \p OpdsMapping.
+  ///
+  /// Elements of \p OpdsMapping that are nullptr will be replaced by
+  /// invalid ValueMapping (ValueMapping::isValid == false).
+  const ValueMapping *getOperandsMapping(
+      const SmallVectorImpl<const ValueMapping *> &OpdsMapping) const;
+
+  /// Get the uniquely generated array of ValueMapping for the
+  /// given arguments.
+  ///
+  /// Arguments that are nullptr will be replaced by invalid
+  /// ValueMapping (ValueMapping::isValid == false).
+  const ValueMapping *getOperandsMapping(
+      std::initializer_list<const ValueMapping *> OpdsMapping) const;
+  /// @}
+
+  /// \name Methods to get a uniquely generated InstructionMapping.
+  /// @{
+
+private:
+  /// Method to get a uniquely generated InstructionMapping.
+  const InstructionMapping &
+  getInstructionMappingImpl(bool IsInvalid, unsigned ID = InvalidMappingID,
+                            unsigned Cost = 0,
+                            const ValueMapping *OperandsMapping = nullptr,
+                            unsigned NumOperands = 0) const;
+
+public:
+  /// Method to get a uniquely generated InstructionMapping.
+  const InstructionMapping &
+  getInstructionMapping(unsigned ID, unsigned Cost,
+                        const ValueMapping *OperandsMapping,
+                        unsigned NumOperands) const {
+    return getInstructionMappingImpl(/*IsInvalid*/ false, ID, Cost,
+                                     OperandsMapping, NumOperands);
+  }
+
+  /// Method to get a uniquely generated invalid InstructionMapping.
+  const InstructionMapping &getInvalidInstructionMapping() const {
+    return getInstructionMappingImpl(/*IsInvalid*/ true);
+  }
+  /// @}
+
+  /// Get the register bank for the \p OpIdx-th operand of \p MI form
+  /// the encoding constraints, if any.
+  ///
+  /// \return A register bank that covers the register class of the
+  /// related encoding constraints or nullptr if \p MI did not provide
+  /// enough information to deduce it.
+  const RegisterBank *
+  getRegBankFromConstraints(const MachineInstr &MI, unsigned OpIdx,
+                            const TargetInstrInfo &TII,
+                            const TargetRegisterInfo &TRI) const;
+
+  /// Helper method to apply something that is like the default mapping.
+  /// Basically, that means that \p OpdMapper.getMI() is left untouched
+  /// aside from the reassignment of the register operand that have been
+  /// remapped.
+  ///
+  /// The type of all the new registers that have been created by the
+  /// mapper are properly remapped to the type of the original registers
+  /// they replace. In other words, the semantic of the instruction does
+  /// not change, only the register banks.
+  ///
+  /// If the mapping of one of the operand spans several registers, this
+  /// method will abort as this is not like a default mapping anymore.
+  ///
+  /// \pre For OpIdx in {0..\p OpdMapper.getMI().getNumOperands())
+  ///        the range OpdMapper.getVRegs(OpIdx) is empty or of size 1.
+  static void applyDefaultMapping(const OperandsMapper &OpdMapper);
+
+  /// See ::applyMapping.
+  virtual void applyMappingImpl(const OperandsMapper &OpdMapper) const {
+    llvm_unreachable("The target has to implement that part");
+  }
+
+public:
+  virtual ~RegisterBankInfo() = default;
+
+  /// Get the register bank identified by \p ID.
+  const RegisterBank &getRegBank(unsigned ID) const {
+    return const_cast<RegisterBankInfo *>(this)->getRegBank(ID);
+  }
+
+  /// Get the register bank of \p Reg.
+  /// If Reg has not been assigned a register, a register class,
+  /// or a register bank, then this returns nullptr.
+  ///
+  /// \pre Reg != 0 (NoRegister)
+  const RegisterBank *getRegBank(unsigned Reg, const MachineRegisterInfo &MRI,
+                                 const TargetRegisterInfo &TRI) const;
+
+  /// Get the total number of register banks.
+  unsigned getNumRegBanks() const { return NumRegBanks; }
+
+  /// Get a register bank that covers \p RC.
+  ///
+  /// \pre \p RC is a user-defined register class (as opposed as one
+  /// generated by TableGen).
+  ///
+  /// \note The mapping RC -> RegBank could be built while adding the
+  /// coverage for the register banks. However, we do not do it, because,
+  /// at least for now, we only need this information for register classes
+  /// that are used in the description of instruction. In other words,
+  /// there are just a handful of them and we do not want to waste space.
+  ///
+  /// \todo This should be TableGen'ed.
+  virtual const RegisterBank &
+  getRegBankFromRegClass(const TargetRegisterClass &RC) const {
+    llvm_unreachable("The target must override this method");
+  }
+
+  /// Get the cost of a copy from \p B to \p A, or put differently,
+  /// get the cost of A = COPY B. Since register banks may cover
+  /// different size, \p Size specifies what will be the size in bits
+  /// that will be copied around.
+  ///
+  /// \note Since this is a copy, both registers have the same size.
+  virtual unsigned copyCost(const RegisterBank &A, const RegisterBank &B,
+                            unsigned Size) const {
+    // Optimistically assume that copies are coalesced. I.e., when
+    // they are on the same bank, they are free.
+    // Otherwise assume a non-zero cost of 1. The targets are supposed
+    // to override that properly anyway if they care.
+    return &A != &B;
+  }
+
+  /// Constrain the (possibly generic) virtual register \p Reg to \p RC.
+  ///
+  /// \pre \p Reg is a virtual register that either has a bank or a class.
+  /// \returns The constrained register class, or nullptr if there is none.
+  /// \note This is a generic variant of MachineRegisterInfo::constrainRegClass
+  /// \note Use MachineRegisterInfo::constrainRegAttrs instead for any non-isel
+  /// purpose, including non-select passes of GlobalISel
+  static const TargetRegisterClass *
+  constrainGenericRegister(unsigned Reg, const TargetRegisterClass &RC,
+                           MachineRegisterInfo &MRI);
+
+  /// Identifier used when the related instruction mapping instance
+  /// is generated by target independent code.
+  /// Make sure not to use that identifier to avoid possible collision.
+  static const unsigned DefaultMappingID;
+
+  /// Identifier used when the related instruction mapping instance
+  /// is generated by the default constructor.
+  /// Make sure not to use that identifier.
+  static const unsigned InvalidMappingID;
+
+  /// Get the mapping of the different operands of \p MI
+  /// on the register bank.
+  /// This mapping should be the direct translation of \p MI.
+  /// In other words, when \p MI is mapped with the returned mapping,
+  /// only the register banks of the operands of \p MI need to be updated.
+  /// In particular, neither the opcode nor the type of \p MI needs to be
+  /// updated for this direct mapping.
+  ///
+  /// The target independent implementation gives a mapping based on
+  /// the register classes for the target specific opcode.
+  /// It uses the ID RegisterBankInfo::DefaultMappingID for that mapping.
+  /// Make sure you do not use that ID for the alternative mapping
+  /// for MI. See getInstrAlternativeMappings for the alternative
+  /// mappings.
+  ///
+  /// For instance, if \p MI is a vector add, the mapping should
+  /// not be a scalarization of the add.
+  ///
+  /// \post returnedVal.verify(MI).
+  ///
+  /// \note If returnedVal does not verify MI, this would probably mean
+  /// that the target does not support that instruction.
+  virtual const InstructionMapping &
+  getInstrMapping(const MachineInstr &MI) const;
+
+  /// Get the alternative mappings for \p MI.
+  /// Alternative in the sense different from getInstrMapping.
+  virtual InstructionMappings
+  getInstrAlternativeMappings(const MachineInstr &MI) const;
+
+  /// Get the possible mapping for \p MI.
+  /// A mapping defines where the different operands may live and at what cost.
+  /// For instance, let us consider:
+  /// v0(16) = G_ADD <2 x i8> v1, v2
+  /// The possible mapping could be:
+  ///
+  /// {/*ID*/VectorAdd, /*Cost*/1, /*v0*/{(0xFFFF, VPR)}, /*v1*/{(0xFFFF, VPR)},
+  ///                              /*v2*/{(0xFFFF, VPR)}}
+  /// {/*ID*/ScalarAddx2, /*Cost*/2, /*v0*/{(0x00FF, GPR),(0xFF00, GPR)},
+  ///                                /*v1*/{(0x00FF, GPR),(0xFF00, GPR)},
+  ///                                /*v2*/{(0x00FF, GPR),(0xFF00, GPR)}}
+  ///
+  /// \note The first alternative of the returned mapping should be the
+  /// direct translation of \p MI current form.
+  ///
+  /// \post !returnedVal.empty().
+  InstructionMappings getInstrPossibleMappings(const MachineInstr &MI) const;
+
+  /// Apply \p OpdMapper.getInstrMapping() to \p OpdMapper.getMI().
+  /// After this call \p OpdMapper.getMI() may not be valid anymore.
+  /// \p OpdMapper.getInstrMapping().getID() carries the information of
+  /// what has been chosen to map \p OpdMapper.getMI(). This ID is set
+  /// by the various getInstrXXXMapping method.
+  ///
+  /// Therefore, getting the mapping and applying it should be kept in
+  /// sync.
+  void applyMapping(const OperandsMapper &OpdMapper) const {
+    // The only mapping we know how to handle is the default mapping.
+    if (OpdMapper.getInstrMapping().getID() == DefaultMappingID)
+      return applyDefaultMapping(OpdMapper);
+    // For other mapping, the target needs to do the right thing.
+    // If that means calling applyDefaultMapping, fine, but this
+    // must be explicitly stated.
+    applyMappingImpl(OpdMapper);
+  }
+
+  /// Get the size in bits of \p Reg.
+  /// Utility method to get the size of any registers. Unlike
+  /// MachineRegisterInfo::getSize, the register does not need to be a
+  /// virtual register.
+  ///
+  /// \pre \p Reg != 0 (NoRegister).
+  unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI,
+                         const TargetRegisterInfo &TRI) const;
+
+  /// Check that information hold by this instance make sense for the
+  /// given \p TRI.
+  ///
+  /// \note This method does not check anything when assertions are disabled.
+  ///
+  /// \return True is the check was successful.
+  bool verify(const TargetRegisterInfo &TRI) const;
+};
+
+inline raw_ostream &
+operator<<(raw_ostream &OS,
+           const RegisterBankInfo::PartialMapping &PartMapping) {
+  PartMapping.print(OS);
+  return OS;
+}
+
+inline raw_ostream &
+operator<<(raw_ostream &OS, const RegisterBankInfo::ValueMapping &ValMapping) {
+  ValMapping.print(OS);
+  return OS;
+}
+
+inline raw_ostream &
+operator<<(raw_ostream &OS,
+           const RegisterBankInfo::InstructionMapping &InstrMapping) {
+  InstrMapping.print(OS);
+  return OS;
+}
+
+inline raw_ostream &
+operator<<(raw_ostream &OS, const RegisterBankInfo::OperandsMapper &OpdMapper) {
+  OpdMapper.print(OS, /*ForDebug*/ false);
+  return OS;
+}
+
+/// Hashing function for PartialMapping.
+/// It is required for the hashing of ValueMapping.
+hash_code hash_value(const RegisterBankInfo::PartialMapping &PartMapping);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_REGISTERBANKINFO_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Types.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Types.h
new file mode 100644
index 0000000..7b22e34
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Types.h
@@ -0,0 +1,34 @@
+//===- llvm/CodeGen/GlobalISel/Types.h - Types used by GISel ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file describes high level types that are used by several passes or
+/// APIs involved in the GlobalISel pipeline.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_TYPES_H
+#define LLVM_CODEGEN_GLOBALISEL_TYPES_H
+
+#include "llvm/ADT/DenseMap.h"
+
+namespace llvm {
+
+class Value;
+
+/// Map a value to a virtual register.
+/// For now, we chose to map aggregate types to on single virtual
+/// register. This might be revisited if it turns out to be inefficient.
+/// PR26161 tracks that.
+/// Note: We need to expose this type to the target hooks for thing like
+/// ABI lowering that would be used during IRTranslation.
+using ValueToVReg = DenseMap<const Value *, unsigned>;
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_TYPES_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h
new file mode 100644
index 0000000..837035f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -0,0 +1,106 @@
+//==-- llvm/CodeGen/GlobalISel/Utils.h ---------------------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API of helper functions used throughout the
+/// GlobalISel pipeline.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_UTILS_H
+#define LLVM_CODEGEN_GLOBALISEL_UTILS_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+class MachineFunction;
+class MachineInstr;
+class MachineOperand;
+class MachineOptimizationRemarkEmitter;
+class MachineOptimizationRemarkMissed;
+class MachineRegisterInfo;
+class MCInstrDesc;
+class RegisterBankInfo;
+class TargetInstrInfo;
+class TargetPassConfig;
+class TargetRegisterInfo;
+class TargetRegisterClass;
+class Twine;
+class ConstantFP;
+class APFloat;
+
+/// Try to constrain Reg to the specified register class. If this fails,
+/// create a new virtual register in the correct class and insert a COPY before
+/// \p InsertPt. The debug location of \p InsertPt is used for the new copy.
+///
+/// \return The virtual register constrained to the right register class.
+unsigned constrainRegToClass(MachineRegisterInfo &MRI,
+                             const TargetInstrInfo &TII,
+                             const RegisterBankInfo &RBI,
+                             MachineInstr &InsertPt, unsigned Reg,
+                             const TargetRegisterClass &RegClass);
+
+/// Try to constrain Reg so that it is usable by argument OpIdx of the
+/// provided MCInstrDesc \p II. If this fails, create a new virtual
+/// register in the correct class and insert a COPY before \p InsertPt.
+/// This is equivalent to constrainRegToClass() with RegClass obtained from the
+/// MCInstrDesc. The debug location of \p InsertPt is used for the new copy.
+///
+/// \return The virtual register constrained to the right register class.
+unsigned constrainOperandRegClass(const MachineFunction &MF,
+                                  const TargetRegisterInfo &TRI,
+                                  MachineRegisterInfo &MRI,
+                                  const TargetInstrInfo &TII,
+                                  const RegisterBankInfo &RBI,
+                                  MachineInstr &InsertPt, const MCInstrDesc &II,
+                                  const MachineOperand &RegMO, unsigned OpIdx);
+
+/// Mutate the newly-selected instruction \p I to constrain its (possibly
+/// generic) virtual register operands to the instruction's register class.
+/// This could involve inserting COPYs before (for uses) or after (for defs).
+/// This requires the number of operands to match the instruction description.
+/// \returns whether operand regclass constraining succeeded.
+///
+// FIXME: Not all instructions have the same number of operands. We should
+// probably expose a constrain helper per operand and let the target selector
+// constrain individual registers, like fast-isel.
+bool constrainSelectedInstRegOperands(MachineInstr &I,
+                                      const TargetInstrInfo &TII,
+                                      const TargetRegisterInfo &TRI,
+                                      const RegisterBankInfo &RBI);
+/// Check whether an instruction \p MI is dead: it only defines dead virtual
+/// registers, and doesn't have other side effects.
+bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI);
+
+/// Report an ISel error as a missed optimization remark to the LLVMContext's
+/// diagnostic stream.  Set the FailedISel MachineFunction property.
+void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
+                        MachineOptimizationRemarkEmitter &MORE,
+                        MachineOptimizationRemarkMissed &R);
+
+void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
+                        MachineOptimizationRemarkEmitter &MORE,
+                        const char *PassName, StringRef Msg,
+                        const MachineInstr &MI);
+
+Optional<int64_t> getConstantVRegVal(unsigned VReg,
+                                     const MachineRegisterInfo &MRI);
+const ConstantFP* getConstantFPVRegVal(unsigned VReg,
+                                       const MachineRegisterInfo &MRI);
+
+/// See if Reg is defined by an single def instruction that is
+/// Opcode. Also try to do trivial folding if it's a COPY with
+/// same types. Returns null otherwise.
+MachineInstr *getOpcodeDef(unsigned Opcode, unsigned Reg,
+                           const MachineRegisterInfo &MRI);
+
+/// Returns an APFloat from Val converted to the appropriate size.
+APFloat getAPFloatFromSize(double Val, unsigned Size);
+} // End namespace llvm.
+#endif