Update clang to r339409.

Change-Id: I800772d2d838223be1f6b40d490c4591b937fca2
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h
index 8d91cc4..32980ce 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -123,7 +123,7 @@
   }
 
   template <typename FuncInfoTy>
-  void setArgFlags(ArgInfo &Arg, unsigned OpNum, const DataLayout &DL,
+  void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL,
                    const FuncInfoTy &FuncInfo) const;
 
   /// Invoke Handler::assignArg on each of the given \p Args and then use
@@ -131,19 +131,19 @@
   ///
   /// \return True if everything has succeeded, false otherwise.
   bool handleAssignments(MachineIRBuilder &MIRBuilder, ArrayRef<ArgInfo> Args,
-                         ValueHandler &Callback) const;
+                         ValueHandler &Handler) const;
 
 public:
   CallLowering(const TargetLowering *TLI) : TLI(TLI) {}
   virtual ~CallLowering() = default;
 
   /// This hook must be implemented to lower outgoing return values, described
-  /// by \p Val, into the specified virtual register \p VReg.
+  /// by \p Val, into the specified virtual registers \p VRegs.
   /// This hook is used by GlobalISel.
   ///
   /// \return True if the lowering succeeds, false otherwise.
-  virtual bool lowerReturn(MachineIRBuilder &MIRBuilder,
-                           const Value *Val, unsigned VReg) const {
+  virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
+                           ArrayRef<unsigned> VRegs) const {
     return false;
   }
 
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h
new file mode 100644
index 0000000..8d61f9a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h
@@ -0,0 +1,134 @@
+//===-- llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h  --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements a version of MachineIRBuilder which does trivial
+/// constant folding.
+//===----------------------------------------------------------------------===//
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
+
+namespace llvm {
+
+static Optional<APInt> ConstantFoldBinOp(unsigned Opcode, const unsigned Op1,
+                                         const unsigned Op2,
+                                         const MachineRegisterInfo &MRI) {
+  auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
+  auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI);
+  if (MaybeOp1Cst && MaybeOp2Cst) {
+    LLT Ty = MRI.getType(Op1);
+    APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true);
+    APInt C2(Ty.getSizeInBits(), *MaybeOp2Cst, true);
+    switch (Opcode) {
+    default:
+      break;
+    case TargetOpcode::G_ADD:
+      return C1 + C2;
+    case TargetOpcode::G_AND:
+      return C1 & C2;
+    case TargetOpcode::G_ASHR:
+      return C1.ashr(C2);
+    case TargetOpcode::G_LSHR:
+      return C1.lshr(C2);
+    case TargetOpcode::G_MUL:
+      return C1 * C2;
+    case TargetOpcode::G_OR:
+      return C1 | C2;
+    case TargetOpcode::G_SHL:
+      return C1 << C2;
+    case TargetOpcode::G_SUB:
+      return C1 - C2;
+    case TargetOpcode::G_XOR:
+      return C1 ^ C2;
+    case TargetOpcode::G_UDIV:
+      if (!C2.getBoolValue())
+        break;
+      return C1.udiv(C2);
+    case TargetOpcode::G_SDIV:
+      if (!C2.getBoolValue())
+        break;
+      return C1.sdiv(C2);
+    case TargetOpcode::G_UREM:
+      if (!C2.getBoolValue())
+        break;
+      return C1.urem(C2);
+    case TargetOpcode::G_SREM:
+      if (!C2.getBoolValue())
+        break;
+      return C1.srem(C2);
+    }
+  }
+  return None;
+}
+
+/// An MIRBuilder which does trivial constant folding of binary ops.
+/// Calls to buildInstr will also try to constant fold binary ops.
+class ConstantFoldingMIRBuilder
+    : public FoldableInstructionsBuilder<ConstantFoldingMIRBuilder> {
+public:
+  // Pull in base class constructors.
+  using FoldableInstructionsBuilder<
+      ConstantFoldingMIRBuilder>::FoldableInstructionsBuilder;
+  // Unhide buildInstr
+  using FoldableInstructionsBuilder<ConstantFoldingMIRBuilder>::buildInstr;
+
+  // Implement buildBinaryOp required by FoldableInstructionsBuilder which
+  // tries to constant fold.
+  MachineInstrBuilder buildBinaryOp(unsigned Opcode, unsigned Dst,
+                                    unsigned Src0, unsigned Src1) {
+    validateBinaryOp(Dst, Src0, Src1);
+    auto MaybeCst = ConstantFoldBinOp(Opcode, Src0, Src1, getMF().getRegInfo());
+    if (MaybeCst)
+      return buildConstant(Dst, MaybeCst->getSExtValue());
+    return buildInstr(Opcode).addDef(Dst).addUse(Src0).addUse(Src1);
+  }
+
+  template <typename DstTy, typename UseArg1Ty, typename UseArg2Ty>
+  MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty, UseArg1Ty &&Arg1,
+                                 UseArg2Ty &&Arg2) {
+    unsigned Dst = getDestFromArg(Ty);
+    return buildInstr(Opc, Dst, getRegFromArg(std::forward<UseArg1Ty>(Arg1)),
+                      getRegFromArg(std::forward<UseArg2Ty>(Arg2)));
+  }
+
+  // Try to provide an overload for buildInstr for binary ops in order to
+  // constant fold.
+  MachineInstrBuilder buildInstr(unsigned Opc, unsigned Dst, unsigned Src0,
+                                 unsigned Src1) {
+    switch (Opc) {
+    default:
+      break;
+    case TargetOpcode::G_ADD:
+    case TargetOpcode::G_AND:
+    case TargetOpcode::G_ASHR:
+    case TargetOpcode::G_LSHR:
+    case TargetOpcode::G_MUL:
+    case TargetOpcode::G_OR:
+    case TargetOpcode::G_SHL:
+    case TargetOpcode::G_SUB:
+    case TargetOpcode::G_XOR:
+    case TargetOpcode::G_UDIV:
+    case TargetOpcode::G_SDIV:
+    case TargetOpcode::G_UREM:
+    case TargetOpcode::G_SREM: {
+      return buildBinaryOp(Opc, Dst, Src0, Src1);
+    }
+    }
+    return buildInstr(Opc).addDef(Dst).addUse(Src0).addUse(Src1);
+  }
+
+  // Fallback implementation of buildInstr.
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty,
+                                 UseArgsTy &&... Args) {
+    auto MIB = buildInstr(Opc).addDef(getDestFromArg(Ty));
+    addUsesFromArgs(MIB, std::forward<UseArgsTy>(Args)...);
+    return MIB;
+  }
+};
+} // namespace llvm
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index 7061c01..f355396 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -24,6 +24,7 @@
 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
 #include "llvm/CodeGen/GlobalISel/Types.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Support/Allocator.h"
 #include "llvm/IR/Intrinsics.h"
 #include <memory>
 #include <utility>
@@ -63,9 +64,83 @@
   /// Interface used to lower the everything related to calls.
   const CallLowering *CLI;
 
-  /// Mapping of the values of the current LLVM IR function
-  /// to the related virtual registers.
-  ValueToVReg ValToVReg;
+  /// This class contains the mapping between the Values to vreg related data.
+  class ValueToVRegInfo {
+  public:
+    ValueToVRegInfo() = default;
+
+    using VRegListT = SmallVector<unsigned, 1>;
+    using OffsetListT = SmallVector<uint64_t, 1>;
+
+    using const_vreg_iterator =
+        DenseMap<const Value *, VRegListT *>::const_iterator;
+    using const_offset_iterator =
+        DenseMap<const Value *, OffsetListT *>::const_iterator;
+
+    inline const_vreg_iterator vregs_end() const { return ValToVRegs.end(); }
+
+    VRegListT *getVRegs(const Value &V) {
+      auto It = ValToVRegs.find(&V);
+      if (It != ValToVRegs.end())
+        return It->second;
+
+      return insertVRegs(V);
+    }
+
+    OffsetListT *getOffsets(const Value &V) {
+      auto It = TypeToOffsets.find(V.getType());
+      if (It != TypeToOffsets.end())
+        return It->second;
+
+      return insertOffsets(V);
+    }
+
+    const_vreg_iterator findVRegs(const Value &V) const {
+      return ValToVRegs.find(&V);
+    }
+
+    bool contains(const Value &V) const {
+      return ValToVRegs.find(&V) != ValToVRegs.end();
+    }
+
+    void reset() {
+      ValToVRegs.clear();
+      TypeToOffsets.clear();
+      VRegAlloc.DestroyAll();
+      OffsetAlloc.DestroyAll();
+    }
+
+  private:
+    VRegListT *insertVRegs(const Value &V) {
+      assert(ValToVRegs.find(&V) == ValToVRegs.end() && "Value already exists");
+
+      // We placement new using our fast allocator since we never try to free
+      // the vectors until translation is finished.
+      auto *VRegList = new (VRegAlloc.Allocate()) VRegListT();
+      ValToVRegs[&V] = VRegList;
+      return VRegList;
+    }
+
+    OffsetListT *insertOffsets(const Value &V) {
+      assert(TypeToOffsets.find(V.getType()) == TypeToOffsets.end() &&
+             "Type already exists");
+
+      auto *OffsetList = new (OffsetAlloc.Allocate()) OffsetListT();
+      TypeToOffsets[V.getType()] = OffsetList;
+      return OffsetList;
+    }
+    SpecificBumpPtrAllocator<VRegListT> VRegAlloc;
+    SpecificBumpPtrAllocator<OffsetListT> OffsetAlloc;
+
+    // We store pointers to vectors here since references may be invalidated
+    // while we hold them if we stored the vectors directly.
+    DenseMap<const Value *, VRegListT*> ValToVRegs;
+    DenseMap<const Type *, OffsetListT*> TypeToOffsets;
+  };
+
+  /// Mapping of the values of the current LLVM IR function to the related
+  /// virtual registers and offsets.
+  ValueToVRegInfo VMap;
 
   // N.b. it's not completely obvious that this will be sufficient for every
   // LLVM IR construct (with "invoke" being the obvious candidate to mess up our
@@ -82,7 +157,8 @@
 
   // List of stubbed PHI instructions, for values and basic blocks to be filled
   // in once all MachineBasicBlocks have been created.
-  SmallVector<std::pair<const PHINode *, MachineInstr *>, 4> PendingPHIs;
+  SmallVector<std::pair<const PHINode *, SmallVector<MachineInstr *, 1>>, 4>
+      PendingPHIs;
 
   /// Record of what frame index has been allocated to specified allocas for
   /// this function.
@@ -99,7 +175,7 @@
   /// The general algorithm is:
   /// 1. Look for a virtual register for each operand or
   ///    create one.
-  /// 2 Update the ValToVReg accordingly.
+  /// 2 Update the VMap accordingly.
   /// 2.alt. For constant arguments, if they are compile time constants,
   ///   produce an immediate in the right operand and do not touch
   ///   ValToReg. Actually we will go with a virtual register for each
@@ -134,7 +210,7 @@
 
   /// Translate an LLVM string intrinsic (memcpy, memset, ...).
   bool translateMemfunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
-                        unsigned Intrinsic);
+                        unsigned ID);
 
   void getStackGuard(unsigned DstReg, MachineIRBuilder &MIRBuilder);
 
@@ -146,6 +222,19 @@
 
   bool translateInlineAsm(const CallInst &CI, MachineIRBuilder &MIRBuilder);
 
+  // FIXME: temporary function to expose previous interface to call lowering
+  // until it is refactored.
+  /// Combines all component registers of \p V into a single scalar with size
+  /// "max(Offsets) + last size".
+  unsigned packRegs(const Value &V, MachineIRBuilder &MIRBuilder);
+
+  void unpackRegs(const Value &V, unsigned Src, MachineIRBuilder &MIRBuilder);
+
+  /// Returns true if the value should be split into multiple LLTs.
+  /// If \p Offsets is given then the split type's offsets will be stored in it.
+  bool valueIsSplit(const Value &V,
+                    SmallVectorImpl<uint64_t> *Offsets = nullptr);
+
   /// Translate call instruction.
   /// \pre \p U is a call instruction.
   bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
@@ -310,6 +399,9 @@
 
   bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);
 
+  bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder);
+  bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder);
+
   // Stubs to keep the compiler happy while we implement the rest of the
   // translation.
   bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
@@ -327,14 +419,8 @@
   bool translateFence(const User &U, MachineIRBuilder &MIRBuilder) {
     return false;
   }
-  bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder) {
-    return false;
-  }
-  bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder) {
-    return false;
-  }
   bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
-    return false;
+    return translateCast(TargetOpcode::G_ADDRSPACE_CAST, U, MIRBuilder);
   }
   bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
     return false;
@@ -381,9 +467,24 @@
   // * Clear the different maps.
   void finalizeFunction();
 
-  /// Get the VReg that represents \p Val.
-  /// If such VReg does not exist, it is created.
-  unsigned getOrCreateVReg(const Value &Val);
+  /// Get the VRegs that represent \p Val.
+  /// Non-aggregate types have just one corresponding VReg and the list can be
+  /// used as a single "unsigned". Aggregates get flattened. If such VRegs do
+  /// not exist, they are created.
+  ArrayRef<unsigned> getOrCreateVRegs(const Value &Val);
+
+  unsigned getOrCreateVReg(const Value &Val) {
+    auto Regs = getOrCreateVRegs(Val);
+    if (Regs.empty())
+      return 0;
+    assert(Regs.size() == 1 &&
+           "attempt to get single VReg for aggregate or void");
+    return Regs[0];
+  }
+
+  /// Allocate some vregs and offsets in the VMap. Then populate just the
+  /// offsets while leaving the vregs empty.
+  ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);
 
   /// Get the frame index that represents \p Val.
   /// If such VReg does not exist, it is created.
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
index eacd135..471def7 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
@@ -20,6 +20,7 @@
 #include "llvm/ADT/Optional.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Support/CodeGenCoverage.h"
+#include "llvm/Support/LowLevelTypeImpl.h"
 #include <bitset>
 #include <cstddef>
 #include <cstdint>
@@ -31,7 +32,6 @@
 
 class APInt;
 class APFloat;
-class LLT;
 class MachineInstr;
 class MachineInstrBuilder;
 class MachineFunction;
@@ -81,6 +81,23 @@
   ///        failed match.
   GIM_Try,
 
+  /// Switch over the opcode on the specified instruction
+  /// - InsnID - Instruction ID
+  /// - LowerBound - numerically minimum opcode supported
+  /// - UpperBound - numerically maximum + 1 opcode supported
+  /// - Default - failure jump target
+  /// - JumpTable... - (UpperBound - LowerBound) (at least 2) jump targets
+  GIM_SwitchOpcode,
+
+  /// Switch over the LLT on the specified instruction operand
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - LowerBound - numerically minimum Type ID supported
+  /// - UpperBound - numerically maximum + 1 Type ID supported
+  /// - Default - failure jump target
+  /// - JumpTable... - (UpperBound - LowerBound) (at least 2) jump targets
+  GIM_SwitchType,
+
   /// Record the specified instruction
   /// - NewInsnID - Instruction ID to define
   /// - InsnID - Instruction ID
@@ -117,6 +134,23 @@
   GIM_CheckAtomicOrdering,
   GIM_CheckAtomicOrderingOrStrongerThan,
   GIM_CheckAtomicOrderingWeakerThan,
+  /// Check the size of the memory access for the given machine memory operand.
+  /// - InsnID - Instruction ID
+  /// - MMOIdx - MMO index
+  /// - Size - The size in bytes of the memory access
+  GIM_CheckMemorySizeEqualTo,
+  /// Check the size of the memory access for the given machine memory operand
+  /// against the size of an operand.
+  /// - InsnID - Instruction ID
+  /// - MMOIdx - MMO index
+  /// - OpIdx - The operand index to compare the MMO against
+  GIM_CheckMemorySizeEqualToLLT,
+  GIM_CheckMemorySizeLessThanLLT,
+  GIM_CheckMemorySizeGreaterThanLLT,
+  /// Check a generic C++ instruction predicate
+  /// - InsnID - Instruction ID
+  /// - PredicateID - The ID of the predicate function to call
+  GIM_CheckCxxInsnPredicate,
 
   /// Check the type for the specified operand
   /// - InsnID - Instruction ID
@@ -133,12 +167,14 @@
   /// - OpIdx - Operand index
   /// - Expected register bank (specified as a register class)
   GIM_CheckRegBankForClass,
+
   /// Check the operand matches a complex predicate
   /// - InsnID - Instruction ID
   /// - OpIdx - Operand index
   /// - RendererID - The renderer to hold the result
   /// - Complex predicate ID
   GIM_CheckComplexPattern,
+
   /// Check the operand is a specific integer
   /// - InsnID - Instruction ID
   /// - OpIdx - Operand index
@@ -155,6 +191,7 @@
   /// - OpIdx - Operand index
   /// - Expected Intrinsic ID
   GIM_CheckIntrinsicID,
+
   /// Check the specified operand is an MBB
   /// - InsnID - Instruction ID
   /// - OpIdx - Operand index
@@ -183,6 +220,7 @@
   /// - OldInsnID - Instruction ID to mutate
   /// - NewOpcode - The new opcode to use
   GIR_MutateOpcode,
+
   /// Build a new instruction
   /// - InsnID - Instruction ID to define
   /// - Opcode - The new opcode to use
@@ -193,6 +231,7 @@
   /// - OldInsnID - Instruction ID to copy from
   /// - OpIdx - The operand to copy
   GIR_Copy,
+
   /// Copy an operand to the specified instruction or add a zero register if the
   /// operand is a zero immediate.
   /// - NewInsnID - Instruction ID to modify
@@ -206,6 +245,7 @@
   /// - OpIdx - The operand to copy
   /// - SubRegIdx - The subregister to copy
   GIR_CopySubReg,
+
   /// Add an implicit register def to the specified instruction
   /// - InsnID - Instruction ID to modify
   /// - RegNum - The register to add
@@ -218,10 +258,13 @@
   /// - InsnID - Instruction ID to modify
   /// - RegNum - The register to add
   GIR_AddRegister,
+
   /// Add a temporary register to the specified instruction
   /// - InsnID - Instruction ID to modify
   /// - TempRegID - The temporary register ID to add
+  /// - TempRegFlags - The register flags to set
   GIR_AddTempRegister,
+
   /// Add an immediate to the specified instruction
   /// - InsnID - Instruction ID to modify
   /// - Imm - The immediate to add
@@ -230,6 +273,7 @@
   /// - InsnID - Instruction ID to modify
   /// - RendererID - The renderer to call
   GIR_ComplexRenderer,
+
   /// Render sub-operands of complex operands to the specified instruction
   /// - InsnID - Instruction ID to modify
   /// - RendererID - The renderer to call
@@ -247,24 +291,34 @@
   /// The operand index is implicitly 1.
   GIR_CopyConstantAsSImm,
 
+  /// Render a G_FCONSTANT operator as a sign-extended immediate.
+  /// - NewInsnID - Instruction ID to modify
+  /// - OldInsnID - Instruction ID to copy from
+  /// The operand index is implicitly 1.
+  GIR_CopyFConstantAsFPImm,
+
   /// Constrain an instruction operand to a register class.
   /// - InsnID - Instruction ID to modify
   /// - OpIdx - Operand index
   /// - RCEnum - Register class enumeration value
   GIR_ConstrainOperandRC,
+
   /// Constrain an instructions operands according to the instruction
   /// description.
   /// - InsnID - Instruction ID to modify
   GIR_ConstrainSelectedInstOperands,
+
   /// Merge all memory operands into instruction.
   /// - InsnID - Instruction ID to modify
   /// - MergeInsnID... - One or more Instruction ID to merge into the result.
   /// - GIU_MergeMemOperands_EndOfList - Terminates the list of instructions to
   ///                                    merge.
   GIR_MergeMemOperands,
+
   /// Erase from parent.
   /// - InsnID - Instruction ID to erase
   GIR_EraseFromParent,
+
   /// Create a new temporary register that's not constrained.
   /// - TempRegID - The temporary register ID to initialize.
   /// - Expected type
@@ -276,6 +330,9 @@
   /// Increment the rule coverage counter.
   /// - RuleID - The ID of the rule that was covered.
   GIR_Coverage,
+
+  /// Keeping track of the number of the GI opcodes. Must be the last entry.
+  GIU_NumOpcodes,
 };
 
 enum {
@@ -319,10 +376,24 @@
   template <class PredicateBitset, class ComplexMatcherMemFn,
             class CustomRendererFn>
   struct ISelInfoTy {
+    ISelInfoTy(const LLT *TypeObjects, size_t NumTypeObjects,
+               const PredicateBitset *FeatureBitsets,
+               const ComplexMatcherMemFn *ComplexPredicates,
+               const CustomRendererFn *CustomRenderers)
+        : TypeObjects(TypeObjects),
+          FeatureBitsets(FeatureBitsets),
+          ComplexPredicates(ComplexPredicates),
+          CustomRenderers(CustomRenderers) {
+
+      for (size_t I = 0; I < NumTypeObjects; ++I)
+        TypeIDMap[TypeObjects[I]] = I;
+    }
     const LLT *TypeObjects;
     const PredicateBitset *FeatureBitsets;
     const ComplexMatcherMemFn *ComplexPredicates;
     const CustomRendererFn *CustomRenderers;
+
+    SmallDenseMap<LLT, unsigned, 64> TypeIDMap;
   };
 
 protected:
@@ -341,14 +412,25 @@
       const RegisterBankInfo &RBI, const PredicateBitset &AvailableFeatures,
       CodeGenCoverage &CoverageInfo) const;
 
+  virtual const int64_t *getMatchTable() const {
+    llvm_unreachable("Should have been overridden by tablegen if used");
+  }
+
   virtual bool testImmPredicate_I64(unsigned, int64_t) const {
-    llvm_unreachable("Subclasses must override this to use tablegen");
+    llvm_unreachable(
+        "Subclasses must override this with a tablegen-erated function");
   }
   virtual bool testImmPredicate_APInt(unsigned, const APInt &) const {
-    llvm_unreachable("Subclasses must override this to use tablegen");
+    llvm_unreachable(
+        "Subclasses must override this with a tablegen-erated function");
   }
   virtual bool testImmPredicate_APFloat(unsigned, const APFloat &) const {
-    llvm_unreachable("Subclasses must override this to use tablegen");
+    llvm_unreachable(
+        "Subclasses must override this with a tablegen-erated function");
+  }
+  virtual bool testMIPredicate_MI(unsigned, const MachineInstr &) const {
+    llvm_unreachable(
+        "Subclasses must override this with a tablegen-erated function");
   }
 
   /// Constrain a register operand of an instruction \p I to a specified
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
index f7593ba..2003a79 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
@@ -41,6 +41,7 @@
   GIPFP_I64_Invalid = 0,
   GIPFP_APInt_Invalid = 0,
   GIPFP_APFloat_Invalid = 0,
+  GIPFP_MI_Invalid = 0,
 };
 
 template <class TgtInstructionSelector, class PredicateBitset,
@@ -53,8 +54,9 @@
     MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
     const RegisterBankInfo &RBI, const PredicateBitset &AvailableFeatures,
     CodeGenCoverage &CoverageInfo) const {
+
   uint64_t CurrentIdx = 0;
-  SmallVector<uint64_t, 8> OnFailResumeAt;
+  SmallVector<uint64_t, 4> OnFailResumeAt;
 
   enum RejectAction { RejectAndGiveUp, RejectAndResume };
   auto handleReject = [&]() -> RejectAction {
@@ -62,8 +64,7 @@
                     dbgs() << CurrentIdx << ": Rejected\n");
     if (OnFailResumeAt.empty())
       return RejectAndGiveUp;
-    CurrentIdx = OnFailResumeAt.back();
-    OnFailResumeAt.pop_back();
+    CurrentIdx = OnFailResumeAt.pop_back_val();
     DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                     dbgs() << CurrentIdx << ": Resume at " << CurrentIdx << " ("
                            << OnFailResumeAt.size() << " try-blocks remain)\n");
@@ -72,7 +73,8 @@
 
   while (true) {
     assert(CurrentIdx != ~0u && "Invalid MatchTable index");
-    switch (MatchTable[CurrentIdx++]) {
+    int64_t MatcherOpcode = MatchTable[CurrentIdx++];
+    switch (MatcherOpcode) {
     case GIM_Try: {
       DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                       dbgs() << CurrentIdx << ": Begin try-block\n");
@@ -138,12 +140,13 @@
       int64_t InsnID = MatchTable[CurrentIdx++];
       int64_t Expected = MatchTable[CurrentIdx++];
 
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
       unsigned Opcode = State.MIs[InsnID]->getOpcode();
+
       DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                       dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
                              << "], ExpectedOpcode=" << Expected
                              << ") // Got=" << Opcode << "\n");
-      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
       if (Opcode != Expected) {
         if (handleReject() == RejectAndGiveUp)
           return false;
@@ -151,6 +154,77 @@
       break;
     }
 
+    case GIM_SwitchOpcode: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t LowerBound = MatchTable[CurrentIdx++];
+      int64_t UpperBound = MatchTable[CurrentIdx++];
+      int64_t Default = MatchTable[CurrentIdx++];
+
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      const int64_t Opcode = State.MIs[InsnID]->getOpcode();
+
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(), {
+        dbgs() << CurrentIdx << ": GIM_SwitchOpcode(MIs[" << InsnID << "], ["
+               << LowerBound << ", " << UpperBound << "), Default=" << Default
+               << ", JumpTable...) // Got=" << Opcode << "\n";
+      });
+      if (Opcode < LowerBound || UpperBound <= Opcode) {
+        CurrentIdx = Default;
+        break;
+      }
+      CurrentIdx = MatchTable[CurrentIdx + (Opcode - LowerBound)];
+      if (!CurrentIdx) {
+        CurrentIdx = Default;
+	break;
+      }
+      OnFailResumeAt.push_back(Default);
+      break;
+    }
+
+    case GIM_SwitchType: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t LowerBound = MatchTable[CurrentIdx++];
+      int64_t UpperBound = MatchTable[CurrentIdx++];
+      int64_t Default = MatchTable[CurrentIdx++];
+
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(), {
+        dbgs() << CurrentIdx << ": GIM_SwitchType(MIs[" << InsnID
+               << "]->getOperand(" << OpIdx << "), [" << LowerBound << ", "
+               << UpperBound << "), Default=" << Default
+               << ", JumpTable...) // Got=";
+        if (!MO.isReg())
+          dbgs() << "Not a VReg\n";
+        else
+          dbgs() << MRI.getType(MO.getReg()) << "\n";
+      });
+      if (!MO.isReg()) {
+        CurrentIdx = Default;
+        break;
+      }
+      const LLT Ty = MRI.getType(MO.getReg());
+      const auto TyI = ISelInfo.TypeIDMap.find(Ty);
+      if (TyI == ISelInfo.TypeIDMap.end()) {
+        CurrentIdx = Default;
+        break;
+      }
+      const int64_t TypeID = TyI->second;
+      if (TypeID < LowerBound || UpperBound <= TypeID) {
+        CurrentIdx = Default;
+        break;
+      }
+      CurrentIdx = MatchTable[CurrentIdx + (TypeID - LowerBound)];
+      if (!CurrentIdx) {
+        CurrentIdx = Default;
+        break;
+      }
+      OnFailResumeAt.push_back(Default);
+      break;
+    }
+
     case GIM_CheckNumOperands: {
       int64_t InsnID = MatchTable[CurrentIdx++];
       int64_t Expected = MatchTable[CurrentIdx++];
@@ -196,7 +270,8 @@
                           << CurrentIdx << ": GIM_CheckAPIntImmPredicate(MIs["
                           << InsnID << "], Predicate=" << Predicate << ")\n");
       assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
-      assert(State.MIs[InsnID]->getOpcode() && "Expected G_CONSTANT");
+      assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_CONSTANT &&
+             "Expected G_CONSTANT");
       assert(Predicate > GIPFP_APInt_Invalid && "Expected a valid predicate");
       APInt Value;
       if (State.MIs[InsnID]->getOperand(1).isCImm())
@@ -228,6 +303,21 @@
           return false;
       break;
     }
+    case GIM_CheckCxxInsnPredicate: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t Predicate = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs()
+                          << CurrentIdx << ": GIM_CheckCxxPredicate(MIs["
+                          << InsnID << "], Predicate=" << Predicate << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      assert(Predicate > GIPFP_MI_Invalid && "Expected a valid predicate");
+
+      if (!testMIPredicate_MI(Predicate, *State.MIs[InsnID]))
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      break;
+    }
     case GIM_CheckAtomicOrdering: {
       int64_t InsnID = MatchTable[CurrentIdx++];
       AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
@@ -235,7 +325,6 @@
                       dbgs() << CurrentIdx << ": GIM_CheckAtomicOrdering(MIs["
                              << InsnID << "], " << (uint64_t)Ordering << ")\n");
       assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
-
       if (!State.MIs[InsnID]->hasOneMemOperand())
         if (handleReject() == RejectAndGiveUp)
           return false;
@@ -254,7 +343,6 @@
                              << ": GIM_CheckAtomicOrderingOrStrongerThan(MIs["
                              << InsnID << "], " << (uint64_t)Ordering << ")\n");
       assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
-
       if (!State.MIs[InsnID]->hasOneMemOperand())
         if (handleReject() == RejectAndGiveUp)
           return false;
@@ -273,7 +361,6 @@
                              << ": GIM_CheckAtomicOrderingWeakerThan(MIs["
                              << InsnID << "], " << (uint64_t)Ordering << ")\n");
       assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
-
       if (!State.MIs[InsnID]->hasOneMemOperand())
         if (handleReject() == RejectAndGiveUp)
           return false;
@@ -284,6 +371,87 @@
             return false;
       break;
     }
+    case GIM_CheckMemorySizeEqualTo: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t MMOIdx = MatchTable[CurrentIdx++];
+      uint64_t Size = MatchTable[CurrentIdx++];
+
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx
+                             << ": GIM_CheckMemorySizeEqual(MIs[" << InsnID
+                             << "]->memoperands() + " << MMOIdx
+                             << ", Size=" << Size << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+        break;
+      }
+
+      MachineMemOperand *MMO = *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
+
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << MMO->getSize() << " bytes vs " << Size
+                             << " bytes\n");
+      if (MMO->getSize() != Size)
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+
+      break;
+    }
+    case GIM_CheckMemorySizeEqualToLLT:
+    case GIM_CheckMemorySizeLessThanLLT:
+    case GIM_CheckMemorySizeGreaterThanLLT: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t MMOIdx = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+
+      DEBUG_WITH_TYPE(
+          TgtInstructionSelector::getName(),
+          dbgs() << CurrentIdx << ": GIM_CheckMemorySize"
+                 << (MatcherOpcode == GIM_CheckMemorySizeEqualToLLT
+                         ? "EqualTo"
+                         : MatcherOpcode == GIM_CheckMemorySizeGreaterThanLLT
+                               ? "GreaterThan"
+                               : "LessThan")
+                 << "LLT(MIs[" << InsnID << "]->memoperands() + " << MMOIdx
+                 << ", OpIdx=" << OpIdx << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (!MO.isReg()) {
+        DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                        dbgs() << CurrentIdx << ": Not a register\n");
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+        break;
+      }
+
+      if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+        break;
+      }
+
+      MachineMemOperand *MMO = *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
+
+      unsigned Size = MRI.getType(MO.getReg()).getSizeInBits();
+      if (MatcherOpcode == GIM_CheckMemorySizeEqualToLLT &&
+          MMO->getSize() * 8 != Size) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      } else if (MatcherOpcode == GIM_CheckMemorySizeLessThanLLT &&
+                 MMO->getSize() * 8 >= Size) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      } else if (MatcherOpcode == GIM_CheckMemorySizeGreaterThanLLT &&
+                 MMO->getSize() * 8 <= Size)
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+
+      break;
+    }
     case GIM_CheckType: {
       int64_t InsnID = MatchTable[CurrentIdx++];
       int64_t OpIdx = MatchTable[CurrentIdx++];
@@ -293,7 +461,6 @@
                              << "]->getOperand(" << OpIdx
                              << "), TypeID=" << TypeID << ")\n");
       assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
-
       MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
       if (!MO.isReg() ||
           MRI.getType(MO.getReg()) != ISelInfo.TypeObjects[TypeID]) {
@@ -312,7 +479,6 @@
                              << InsnID << "]->getOperand(" << OpIdx
                              << "), SizeInBits=" << SizeInBits << ")\n");
       assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
-
       // iPTR must be looked up in the target.
       if (SizeInBits == 0) {
         MachineFunction *MF = State.MIs[InsnID]->getParent()->getParent();
@@ -384,7 +550,6 @@
                              << InsnID << "]->getOperand(" << OpIdx
                              << "), Value=" << Value << ")\n");
       assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
-
       MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
       if (MO.isReg()) {
         // isOperandImmEqual() will sign-extend to 64-bits, so should we.
@@ -480,7 +645,7 @@
     }
     case GIM_Reject:
       DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
-                      dbgs() << CurrentIdx << ": GIM_Reject");
+                      dbgs() << CurrentIdx << ": GIM_Reject\n");
       if (handleReject() == RejectAndGiveUp)
         return false;
       break;
@@ -662,6 +827,23 @@
       break;
     }
 
+    // TODO: Needs a test case once we have a pattern that uses this.
+    case GIR_CopyFConstantAsFPImm: {
+      int64_t NewInsnID = MatchTable[CurrentIdx++];
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+      assert(State.MIs[OldInsnID]->getOpcode() == TargetOpcode::G_FCONSTANT && "Expected G_FCONSTANT");
+      if (State.MIs[OldInsnID]->getOperand(1).isFPImm())
+        OutMIs[NewInsnID].addFPImm(
+            State.MIs[OldInsnID]->getOperand(1).getFPImm());
+      else
+        llvm_unreachable("Expected FPImm operand");
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIR_CopyFPConstantAsFPImm(OutMIs["
+                             << NewInsnID << "], MIs[" << OldInsnID << "])\n");
+      break;
+    }
+
     case GIR_CustomRenderer: {
       int64_t InsnID = MatchTable[CurrentIdx++];
       int64_t OldInsnID = MatchTable[CurrentIdx++];
@@ -755,7 +937,7 @@
 
     case GIR_Done:
       DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
-                      dbgs() << CurrentIdx << ": GIR_Done");
+                      dbgs() << CurrentIdx << ": GIR_Done\n");
       return true;
 
     default:
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
index a1f564b..8735876 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -38,7 +38,7 @@
       return false;
     if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_TRUNC,
                                            MI.getOperand(1).getReg(), MRI)) {
-      DEBUG(dbgs() << ".. Combine MI: " << MI;);
+      LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
       unsigned DstReg = MI.getOperand(0).getReg();
       unsigned SrcReg = DefMI->getOperand(1).getReg();
       Builder.setInstr(MI);
@@ -62,7 +62,7 @@
       if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
           isInstUnsupported({TargetOpcode::G_CONSTANT, {DstTy}}))
         return false;
-      DEBUG(dbgs() << ".. Combine MI: " << MI;);
+      LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
       Builder.setInstr(MI);
       unsigned ZExtSrc = MI.getOperand(1).getReg();
       LLT ZExtSrcTy = MRI.getType(ZExtSrc);
@@ -91,7 +91,7 @@
           isInstUnsupported({TargetOpcode::G_ASHR, {DstTy}}) ||
           isInstUnsupported({TargetOpcode::G_CONSTANT, {DstTy}}))
         return false;
-      DEBUG(dbgs() << ".. Combine MI: " << MI;);
+      LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
       Builder.setInstr(MI);
       unsigned SExtSrc = MI.getOperand(1).getReg();
       LLT SExtSrcTy = MRI.getType(SExtSrc);
@@ -123,7 +123,7 @@
       LLT DstTy = MRI.getType(DstReg);
       if (isInstUnsupported({TargetOpcode::G_IMPLICIT_DEF, {DstTy}}))
         return false;
-      DEBUG(dbgs() << ".. Combine EXT(IMPLICIT_DEF) " << MI;);
+      LLVM_DEBUG(dbgs() << ".. Combine EXT(IMPLICIT_DEF) " << MI;);
       Builder.setInstr(MI);
       Builder.buildInstr(TargetOpcode::G_IMPLICIT_DEF, DstReg);
       markInstAndDefDead(MI, *DefMI, DeadInsts);
@@ -139,9 +139,9 @@
       return false;
 
     unsigned NumDefs = MI.getNumOperands() - 1;
-    unsigned SrcReg = MI.getOperand(NumDefs).getReg();
-    MachineInstr *MergeI = MRI.getVRegDef(SrcReg);
-    if (!MergeI || (MergeI->getOpcode() != TargetOpcode::G_MERGE_VALUES))
+    MachineInstr *MergeI = getOpcodeDef(TargetOpcode::G_MERGE_VALUES,
+                                        MI.getOperand(NumDefs).getReg(), MRI);
+    if (!MergeI)
       return false;
 
     const unsigned NumMergeRegs = MergeI->getNumOperands() - 1;
@@ -253,11 +253,8 @@
     // and as a result, %3, %2, %1 are dead.
     MachineInstr *PrevMI = &MI;
     while (PrevMI != &DefMI) {
-      // If we're dealing with G_UNMERGE_VALUES, tryCombineMerges doesn't really try
-      // to fold copies in between and we can ignore them here.
-      if (PrevMI->getOpcode() == TargetOpcode::G_UNMERGE_VALUES)
-        break;
-      unsigned PrevRegSrc = PrevMI->getOperand(1).getReg();
+      unsigned PrevRegSrc =
+          PrevMI->getOperand(PrevMI->getNumOperands() - 1).getReg();
       MachineInstr *TmpDef = MRI.getVRegDef(PrevRegSrc);
       if (MRI.hasOneUse(PrevRegSrc)) {
         if (TmpDef != &DefMI) {
@@ -269,9 +266,7 @@
         break;
       PrevMI = TmpDef;
     }
-    if ((PrevMI == &DefMI ||
-         DefMI.getOpcode() == TargetOpcode::G_MERGE_VALUES) &&
-        MRI.hasOneUse(DefMI.getOperand(0).getReg()))
+    if (PrevMI == &DefMI && MRI.hasOneUse(DefMI.getOperand(0).getReg()))
       DeadInsts.push_back(&DefMI);
   }
 
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 8bd8a9d..d122e67 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -93,12 +93,24 @@
   const LegalizerInfo &getLegalizerInfo() const { return LI; }
 
 private:
+  /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
+  /// Use by extending the operand's type to \p WideTy using the specified \p
+  /// ExtOpcode for the extension instruction, and replacing the vreg of the
+  /// operand in place.
+  void widenScalarSrc(MachineInstr &MI, LLT WideTy, unsigned OpIdx,
+                      unsigned ExtOpcode);
+
+  /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
+  /// Def by extending the operand's type to \p WideTy and truncating it back
+  /// with the \p TruncOpcode, and replacing the vreg of the operand in place.
+  void widenScalarDst(MachineInstr &MI, LLT WideTy, unsigned OpIdx = 0,
+                      unsigned TruncOpcode = TargetOpcode::G_TRUNC);
 
   /// Helper function to split a wide generic register into bitwise blocks with
   /// the given Type (which implies the number of blocks needed). The generic
   /// registers created are appended to Ops, starting at bit 0 of Reg.
   void extractParts(unsigned Reg, LLT Ty, int NumParts,
-                    SmallVectorImpl<unsigned> &Ops);
+                    SmallVectorImpl<unsigned> &VRegs);
 
   MachineRegisterInfo &MRI;
   const LegalizerInfo &LI;
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
index 117c791..a8c2608 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -19,6 +19,7 @@
 #include "llvm/ADT/None.h"
 #include "llvm/ADT/Optional.h"
 #include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallBitVector.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/CodeGen/TargetOpcodes.h"
@@ -37,6 +38,7 @@
 class MachineInstr;
 class MachineIRBuilder;
 class MachineRegisterInfo;
+class MCInstrInfo;
 
 namespace LegalizeActions {
 enum LegalizeAction : std::uint8_t {
@@ -118,6 +120,21 @@
   unsigned Opcode;
   ArrayRef<LLT> Types;
 
+  struct MemDesc {
+    uint64_t Size;
+    AtomicOrdering Ordering;
+  };
+
+  /// Operations which require memory can use this to place requirements on the
+  /// memory type for each MMO.
+  ArrayRef<MemDesc> MMODescrs;
+
+  constexpr LegalityQuery(unsigned Opcode, const ArrayRef<LLT> Types,
+                          const ArrayRef<MemDesc> MMODescrs)
+      : Opcode(Opcode), Types(Types), MMODescrs(MMODescrs) {}
+  constexpr LegalityQuery(unsigned Opcode, const ArrayRef<LLT> Types)
+      : LegalityQuery(Opcode, Types, {}) {}
+
   raw_ostream &print(raw_ostream &OS) const;
 };
 
@@ -147,8 +164,31 @@
     std::function<std::pair<unsigned, LLT>(const LegalityQuery &)>;
 
 namespace LegalityPredicates {
+struct TypePairAndMemSize {
+  LLT Type0;
+  LLT Type1;
+  uint64_t MemSize;
+
+  bool operator==(const TypePairAndMemSize &Other) const {
+    return Type0 == Other.Type0 && Type1 == Other.Type1 &&
+           MemSize == Other.MemSize;
+  }
+};
+
 /// True iff P0 and P1 are true.
-LegalityPredicate all(LegalityPredicate P0, LegalityPredicate P1);
+template<typename Predicate>
+Predicate all(Predicate P0, Predicate P1) {
+  return [=](const LegalityQuery &Query) {
+    return P0(Query) && P1(Query);
+  };
+}
+/// True iff all given predicates are true.
+template<typename Predicate, typename... Args>
+Predicate all(Predicate P0, Predicate P1, Args... args) {
+  return all(all(P0, P1), args...);
+}
+/// True iff the given type index is the specified types.
+LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit);
 /// True iff the given type index is one of the specified types.
 LegalityPredicate typeInSet(unsigned TypeIdx,
                             std::initializer_list<LLT> TypesInit);
@@ -157,6 +197,11 @@
 LegalityPredicate
 typePairInSet(unsigned TypeIdx0, unsigned TypeIdx1,
               std::initializer_list<std::pair<LLT, LLT>> TypesInit);
+/// True iff the given types for the given pair of type indexes is one of the
+/// specified type pairs.
+LegalityPredicate typePairAndMemSizeInSet(
+    unsigned TypeIdx0, unsigned TypeIdx1, unsigned MMOIdx,
+    std::initializer_list<TypePairAndMemSize> TypesAndMemSizeInit);
 /// True iff the specified type index is a scalar.
 LegalityPredicate isScalar(unsigned TypeIdx);
 /// True iff the specified type index is a scalar that's narrower than the given
@@ -168,14 +213,22 @@
 /// True iff the specified type index is a scalar whose size is not a power of
 /// 2.
 LegalityPredicate sizeNotPow2(unsigned TypeIdx);
+/// True iff the specified MMO index has a size that is not a power of 2
+LegalityPredicate memSizeInBytesNotPow2(unsigned MMOIdx);
 /// True iff the specified type index is a vector whose element count is not a
 /// power of 2.
 LegalityPredicate numElementsNotPow2(unsigned TypeIdx);
+/// True iff the specified MMO index has at an atomic ordering of at Ordering or
+/// stronger.
+LegalityPredicate atomicOrderingAtLeastOrStrongerThan(unsigned MMOIdx,
+                                                      AtomicOrdering Ordering);
 } // end namespace LegalityPredicates
 
 namespace LegalizeMutations {
 /// Select this specific type for the given type index.
 LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty);
+/// Keep the same type as the given type index.
+LegalizeMutation changeTo(unsigned TypeIdx, unsigned FromTypeIdx);
 /// Widen the type for the given type index to the next power of 2.
 LegalizeMutation widenScalarToNextPow2(unsigned TypeIdx, unsigned Min = 0);
 /// Add more elements to the type for the given type index to the next power of
@@ -219,6 +272,33 @@
   bool IsAliasedByAnother;
   SmallVector<LegalizeRule, 2> Rules;
 
+#ifndef NDEBUG
+  /// If bit I is set, this rule set contains a rule that may handle (predicate
+  /// or perform an action upon (or both)) the type index I. The uncertainty
+  /// comes from free-form rules executing user-provided lambda functions. We
+  /// conservatively assume such rules do the right thing and cover all type
+  /// indices. The bitset is intentionally 1 bit wider than it absolutely needs
+  /// to be to distinguish such cases from the cases where all type indices are
+  /// individually handled.
+  SmallBitVector TypeIdxsCovered{MCOI::OPERAND_LAST_GENERIC -
+                                 MCOI::OPERAND_FIRST_GENERIC + 2};
+#endif
+
+  unsigned typeIdx(unsigned TypeIdx) {
+    assert(TypeIdx <=
+               (MCOI::OPERAND_LAST_GENERIC - MCOI::OPERAND_FIRST_GENERIC) &&
+           "Type Index is out of bounds");
+#ifndef NDEBUG
+    TypeIdxsCovered.set(TypeIdx);
+#endif
+    return TypeIdx;
+  }
+  void markAllTypeIdxsAsCovered() {
+#ifndef NDEBUG
+    TypeIdxsCovered.set();
+#endif
+  }
+
   void add(const LegalizeRule &Rule) {
     assert(AliasOf == 0 &&
            "RuleSet is aliased, change the representative opcode instead");
@@ -235,7 +315,7 @@
     return *this;
   }
   /// Use the given action when the predicate is true.
-  /// Action should not be an action that requires mutation.
+  /// Action should be an action that requires mutation.
   LegalizeRuleSet &actionIf(LegalizeAction Action, LegalityPredicate Predicate,
                             LegalizeMutation Mutation) {
     add({Predicate, Action, Mutation});
@@ -246,16 +326,33 @@
   LegalizeRuleSet &actionFor(LegalizeAction Action,
                              std::initializer_list<LLT> Types) {
     using namespace LegalityPredicates;
-    return actionIf(Action, typeInSet(0, Types));
+    return actionIf(Action, typeInSet(typeIdx(0), Types));
+  }
+  /// Use the given action when type index 0 is any type in the given list.
+  /// Action should be an action that requires mutation.
+  LegalizeRuleSet &actionFor(LegalizeAction Action,
+                             std::initializer_list<LLT> Types,
+                             LegalizeMutation Mutation) {
+    using namespace LegalityPredicates;
+    return actionIf(Action, typeInSet(typeIdx(0), Types), Mutation);
   }
   /// Use the given action when type indexes 0 and 1 is any type pair in the
   /// given list.
   /// Action should not be an action that requires mutation.
-  LegalizeRuleSet &
-  actionFor(LegalizeAction Action,
-            std::initializer_list<std::pair<LLT, LLT>> Types) {
+  LegalizeRuleSet &actionFor(LegalizeAction Action,
+                             std::initializer_list<std::pair<LLT, LLT>> Types) {
     using namespace LegalityPredicates;
-    return actionIf(Action, typePairInSet(0, 1, Types));
+    return actionIf(Action, typePairInSet(typeIdx(0), typeIdx(1), Types));
+  }
+  /// Use the given action when type indexes 0 and 1 is any type pair in the
+  /// given list.
+  /// Action should be an action that requires mutation.
+  LegalizeRuleSet &actionFor(LegalizeAction Action,
+                             std::initializer_list<std::pair<LLT, LLT>> Types,
+                             LegalizeMutation Mutation) {
+    using namespace LegalityPredicates;
+    return actionIf(Action, typePairInSet(typeIdx(0), typeIdx(1), Types),
+                    Mutation);
   }
   /// Use the given action when type indexes 0 and 1 are both in the given list.
   /// That is, the type pair is in the cartesian product of the list.
@@ -263,10 +360,11 @@
   LegalizeRuleSet &actionForCartesianProduct(LegalizeAction Action,
                                              std::initializer_list<LLT> Types) {
     using namespace LegalityPredicates;
-    return actionIf(Action, all(typeInSet(0, Types), typeInSet(1, Types)));
+    return actionIf(Action, all(typeInSet(typeIdx(0), Types),
+                                typeInSet(typeIdx(1), Types)));
   }
-  /// Use the given action when type indexes 0 and 1 are both their respective
-  /// lists.
+  /// Use the given action when type indexes 0 and 1 are both in their
+  /// respective lists.
   /// That is, the type pair is in the cartesian product of the lists
   /// Action should not be an action that requires mutation.
   LegalizeRuleSet &
@@ -274,7 +372,20 @@
                             std::initializer_list<LLT> Types0,
                             std::initializer_list<LLT> Types1) {
     using namespace LegalityPredicates;
-    return actionIf(Action, all(typeInSet(0, Types0), typeInSet(1, Types1)));
+    return actionIf(Action, all(typeInSet(typeIdx(0), Types0),
+                                typeInSet(typeIdx(1), Types1)));
+  }
+  /// Use the given action when type indexes 0, 1, and 2 are all in their
+  /// respective lists.
+  /// That is, the type triple is in the cartesian product of the lists
+  /// Action should not be an action that requires mutation.
+  LegalizeRuleSet &actionForCartesianProduct(
+      LegalizeAction Action, std::initializer_list<LLT> Types0,
+      std::initializer_list<LLT> Types1, std::initializer_list<LLT> Types2) {
+    using namespace LegalityPredicates;
+    return actionIf(Action, all(typeInSet(typeIdx(0), Types0),
+                                all(typeInSet(typeIdx(1), Types1),
+                                    typeInSet(typeIdx(2), Types2))));
   }
 
 public:
@@ -292,6 +403,9 @@
 
   /// The instruction is legal if predicate is true.
   LegalizeRuleSet &legalIf(LegalityPredicate Predicate) {
+    // We have no choice but conservatively assume that the free-form
+    // user-provided Predicate properly handles all type indices:
+    markAllTypeIdxsAsCovered();
     return actionIf(LegalizeAction::Legal, Predicate);
   }
   /// The instruction is legal when type index 0 is any type in the given list.
@@ -303,6 +417,15 @@
   LegalizeRuleSet &legalFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
     return actionFor(LegalizeAction::Legal, Types);
   }
+  /// The instruction is legal when type indexes 0 and 1 along with the memory
+  /// size is any type and size tuple in the given list.
+  LegalizeRuleSet &legalForTypesWithMemSize(
+      std::initializer_list<LegalityPredicates::TypePairAndMemSize>
+          TypesAndMemSize) {
+    return actionIf(LegalizeAction::Legal,
+                    LegalityPredicates::typePairAndMemSizeInSet(
+                        typeIdx(0), typeIdx(1), /*MMOIdx*/ 0, TypesAndMemSize));
+  }
   /// The instruction is legal when type indexes 0 and 1 are both in the given
   /// list. That is, the type pair is in the cartesian product of the list.
   LegalizeRuleSet &legalForCartesianProduct(std::initializer_list<LLT> Types) {
@@ -315,8 +438,77 @@
     return actionForCartesianProduct(LegalizeAction::Legal, Types0, Types1);
   }
 
+  /// The instruction is lowered.
+  LegalizeRuleSet &lower() {
+    using namespace LegalizeMutations;
+    // We have no choice but conservatively assume that predicate-less lowering
+    // properly handles all type indices by design:
+    markAllTypeIdxsAsCovered();
+    return actionIf(LegalizeAction::Lower, always);
+  }
+  /// The instruction is lowered if predicate is true. Keep type index 0 as the
+  /// same type.
+  LegalizeRuleSet &lowerIf(LegalityPredicate Predicate) {
+    using namespace LegalizeMutations;
+    // We have no choice but conservatively assume that lowering with a
+    // free-form user provided Predicate properly handles all type indices:
+    markAllTypeIdxsAsCovered();
+    return actionIf(LegalizeAction::Lower, Predicate);
+  }
+  /// The instruction is lowered if predicate is true.
+  LegalizeRuleSet &lowerIf(LegalityPredicate Predicate,
+                           LegalizeMutation Mutation) {
+    // We have no choice but conservatively assume that lowering with a
+    // free-form user provided Predicate properly handles all type indices:
+    markAllTypeIdxsAsCovered();
+    return actionIf(LegalizeAction::Lower, Predicate, Mutation);
+  }
+  /// The instruction is lowered when type index 0 is any type in the given
+  /// list. Keep type index 0 as the same type.
+  LegalizeRuleSet &lowerFor(std::initializer_list<LLT> Types) {
+    return actionFor(LegalizeAction::Lower, Types,
+                     LegalizeMutations::changeTo(0, 0));
+  }
+  /// The instruction is lowered when type index 0 is any type in the given
+  /// list.
+  LegalizeRuleSet &lowerFor(std::initializer_list<LLT> Types,
+                            LegalizeMutation Mutation) {
+    return actionFor(LegalizeAction::Lower, Types, Mutation);
+  }
+  /// The instruction is lowered when type indexes 0 and 1 is any type pair in
+  /// the given list. Keep type index 0 as the same type.
+  LegalizeRuleSet &lowerFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
+    return actionFor(LegalizeAction::Lower, Types,
+                     LegalizeMutations::changeTo(0, 0));
+  }
+  /// The instruction is lowered when type indexes 0 and 1 is any type pair in
+  /// the given list.
+  LegalizeRuleSet &lowerFor(std::initializer_list<std::pair<LLT, LLT>> Types,
+                            LegalizeMutation Mutation) {
+    return actionFor(LegalizeAction::Lower, Types, Mutation);
+  }
+  /// The instruction is lowered when type indexes 0 and 1 are both in their
+  /// respective lists.
+  LegalizeRuleSet &lowerForCartesianProduct(std::initializer_list<LLT> Types0,
+                                            std::initializer_list<LLT> Types1) {
+    using namespace LegalityPredicates;
+    return actionForCartesianProduct(LegalizeAction::Lower, Types0, Types1);
+  }
+  /// The instruction is lowered when when type indexes 0, 1, and 2 are all in
+  /// their respective lists.
+  LegalizeRuleSet &lowerForCartesianProduct(std::initializer_list<LLT> Types0,
+                                            std::initializer_list<LLT> Types1,
+                                            std::initializer_list<LLT> Types2) {
+    using namespace LegalityPredicates;
+    return actionForCartesianProduct(LegalizeAction::Lower, Types0, Types1,
+                                     Types2);
+  }
+
   /// Like legalIf, but for the Libcall action.
   LegalizeRuleSet &libcallIf(LegalityPredicate Predicate) {
+    // We have no choice but conservatively assume that a libcall with a
+    // free-form user provided Predicate properly handles all type indices:
+    markAllTypeIdxsAsCovered();
     return actionIf(LegalizeAction::Libcall, Predicate);
   }
   LegalizeRuleSet &libcallFor(std::initializer_list<LLT> Types) {
@@ -340,12 +532,18 @@
   /// true.
   LegalizeRuleSet &widenScalarIf(LegalityPredicate Predicate,
                                  LegalizeMutation Mutation) {
+    // We have no choice but conservatively assume that an action with a
+    // free-form user provided Predicate properly handles all type indices:
+    markAllTypeIdxsAsCovered();
     return actionIf(LegalizeAction::WidenScalar, Predicate, Mutation);
   }
   /// Narrow the scalar to the one selected by the mutation if the predicate is
   /// true.
   LegalizeRuleSet &narrowScalarIf(LegalityPredicate Predicate,
                                   LegalizeMutation Mutation) {
+    // We have no choice but conservatively assume that an action with a
+    // free-form user provided Predicate properly handles all type indices:
+    markAllTypeIdxsAsCovered();
     return actionIf(LegalizeAction::NarrowScalar, Predicate, Mutation);
   }
 
@@ -353,12 +551,18 @@
   /// predicate is true.
   LegalizeRuleSet &moreElementsIf(LegalityPredicate Predicate,
                                   LegalizeMutation Mutation) {
+    // We have no choice but conservatively assume that an action with a
+    // free-form user provided Predicate properly handles all type indices:
+    markAllTypeIdxsAsCovered();
     return actionIf(LegalizeAction::MoreElements, Predicate, Mutation);
   }
   /// Remove elements to reach the type selected by the mutation if the
   /// predicate is true.
   LegalizeRuleSet &fewerElementsIf(LegalityPredicate Predicate,
                                    LegalizeMutation Mutation) {
+    // We have no choice but conservatively assume that an action with a
+    // free-form user provided Predicate properly handles all type indices:
+    markAllTypeIdxsAsCovered();
     return actionIf(LegalizeAction::FewerElements, Predicate, Mutation);
   }
 
@@ -369,8 +573,15 @@
   LegalizeRuleSet &unsupportedIf(LegalityPredicate Predicate) {
     return actionIf(LegalizeAction::Unsupported, Predicate);
   }
+  LegalizeRuleSet &unsupportedIfMemSizeNotPow2() {
+    return actionIf(LegalizeAction::Unsupported,
+                    LegalityPredicates::memSizeInBytesNotPow2(0));
+  }
 
   LegalizeRuleSet &customIf(LegalityPredicate Predicate) {
+    // We have no choice but conservatively assume that a custom action with a
+    // free-form user provided Predicate properly handles all type indices:
+    markAllTypeIdxsAsCovered();
     return actionIf(LegalizeAction::Custom, Predicate);
   }
   LegalizeRuleSet &customFor(std::initializer_list<LLT> Types) {
@@ -387,54 +598,57 @@
 
   /// Widen the scalar to the next power of two that is at least MinSize.
   /// No effect if the type is not a scalar or is a power of two.
-  LegalizeRuleSet &widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize = 0) {
+  LegalizeRuleSet &widenScalarToNextPow2(unsigned TypeIdx,
+                                         unsigned MinSize = 0) {
     using namespace LegalityPredicates;
-    return widenScalarIf(
-        sizeNotPow2(TypeIdx),
-        LegalizeMutations::widenScalarToNextPow2(TypeIdx, MinSize));
+    return actionIf(LegalizeAction::WidenScalar, sizeNotPow2(typeIdx(TypeIdx)),
+                    LegalizeMutations::widenScalarToNextPow2(TypeIdx, MinSize));
   }
 
   LegalizeRuleSet &narrowScalar(unsigned TypeIdx, LegalizeMutation Mutation) {
     using namespace LegalityPredicates;
-    return narrowScalarIf(isScalar(TypeIdx), Mutation);
+    return actionIf(LegalizeAction::NarrowScalar, isScalar(typeIdx(TypeIdx)),
+                    Mutation);
   }
 
   /// Ensure the scalar is at least as wide as Ty.
   LegalizeRuleSet &minScalar(unsigned TypeIdx, const LLT &Ty) {
     using namespace LegalityPredicates;
     using namespace LegalizeMutations;
-    return widenScalarIf(narrowerThan(TypeIdx, Ty.getSizeInBits()),
-                         changeTo(TypeIdx, Ty));
+    return actionIf(LegalizeAction::WidenScalar,
+                    narrowerThan(TypeIdx, Ty.getSizeInBits()),
+                    changeTo(typeIdx(TypeIdx), Ty));
   }
 
   /// Ensure the scalar is at most as wide as Ty.
   LegalizeRuleSet &maxScalar(unsigned TypeIdx, const LLT &Ty) {
     using namespace LegalityPredicates;
     using namespace LegalizeMutations;
-    return narrowScalarIf(widerThan(TypeIdx, Ty.getSizeInBits()),
-                          changeTo(TypeIdx, Ty));
+    return actionIf(LegalizeAction::NarrowScalar,
+                    widerThan(TypeIdx, Ty.getSizeInBits()),
+                    changeTo(typeIdx(TypeIdx), Ty));
   }
 
   /// Conditionally limit the maximum size of the scalar.
   /// For example, when the maximum size of one type depends on the size of
   /// another such as extracting N bits from an M bit container.
-  LegalizeRuleSet &maxScalarIf(LegalityPredicate Predicate, unsigned TypeIdx, const LLT &Ty) {
+  LegalizeRuleSet &maxScalarIf(LegalityPredicate Predicate, unsigned TypeIdx,
+                               const LLT &Ty) {
     using namespace LegalityPredicates;
     using namespace LegalizeMutations;
-    return narrowScalarIf(
-        [=](const LegalityQuery &Query) {
-          return widerThan(TypeIdx, Ty.getSizeInBits()) &&
-                 Predicate(Query);
-        },
-        changeTo(TypeIdx, Ty));
+    return actionIf(LegalizeAction::NarrowScalar,
+                    [=](const LegalityQuery &Query) {
+                      return widerThan(TypeIdx, Ty.getSizeInBits()) &&
+                             Predicate(Query);
+                    },
+                    changeTo(typeIdx(TypeIdx), Ty));
   }
 
   /// Limit the range of scalar sizes to MinTy and MaxTy.
-  LegalizeRuleSet &clampScalar(unsigned TypeIdx, const LLT &MinTy, const LLT &MaxTy) {
+  LegalizeRuleSet &clampScalar(unsigned TypeIdx, const LLT &MinTy,
+                               const LLT &MaxTy) {
     assert(MinTy.isScalar() && MaxTy.isScalar() && "Expected scalar types");
-
-    return minScalar(TypeIdx, MinTy)
-        .maxScalar(TypeIdx, MaxTy);
+    return minScalar(TypeIdx, MinTy).maxScalar(TypeIdx, MaxTy);
   }
 
   /// Add more elements to the vector to reach the next power of two.
@@ -442,17 +656,21 @@
   /// two.
   LegalizeRuleSet &moreElementsToNextPow2(unsigned TypeIdx) {
     using namespace LegalityPredicates;
-    return moreElementsIf(numElementsNotPow2(TypeIdx),
-                          LegalizeMutations::moreElementsToNextPow2(TypeIdx));
+    return actionIf(LegalizeAction::MoreElements,
+                    numElementsNotPow2(typeIdx(TypeIdx)),
+                    LegalizeMutations::moreElementsToNextPow2(TypeIdx));
   }
 
   /// Limit the number of elements in EltTy vectors to at least MinElements.
   LegalizeRuleSet &clampMinNumElements(unsigned TypeIdx, const LLT &EltTy,
                                        unsigned MinElements) {
-    return moreElementsIf(
+    // Mark the type index as covered:
+    typeIdx(TypeIdx);
+    return actionIf(
+        LegalizeAction::MoreElements,
         [=](const LegalityQuery &Query) {
           LLT VecTy = Query.Types[TypeIdx];
-          return VecTy.getElementType() == EltTy &&
+          return VecTy.isVector() && VecTy.getElementType() == EltTy &&
                  VecTy.getNumElements() < MinElements;
         },
         [=](const LegalityQuery &Query) {
@@ -464,10 +682,13 @@
   /// Limit the number of elements in EltTy vectors to at most MaxElements.
   LegalizeRuleSet &clampMaxNumElements(unsigned TypeIdx, const LLT &EltTy,
                                        unsigned MaxElements) {
-    return fewerElementsIf(
+    // Mark the type index as covered:
+    typeIdx(TypeIdx);
+    return actionIf(
+        LegalizeAction::FewerElements,
         [=](const LegalityQuery &Query) {
           LLT VecTy = Query.Types[TypeIdx];
-          return VecTy.getElementType() == EltTy &&
+          return VecTy.isVector() && VecTy.getElementType() == EltTy &&
                  VecTy.getNumElements() > MaxElements;
         },
         [=](const LegalityQuery &Query) {
@@ -499,6 +720,11 @@
     return *this;
   }
 
+  /// Check if there is no type index which is obviously not handled by the
+  /// LegalizeRuleSet in any way at all.
+  /// \pre Type indices of the opcode form a dense [0, \p NumTypeIdxs) set.
+  bool verifyTypeIdxsCoverage(unsigned NumTypeIdxs) const;
+
   /// Apply the ruleset to the given LegalityQuery.
   LegalizeActionStep apply(const LegalityQuery &Query) const;
 };
@@ -516,6 +742,10 @@
   /// before any query is made or incorrect results may be returned.
   void computeTables();
 
+  /// Perform simple self-diagnostic and assert if there is anything obviously
+  /// wrong with the actions set up.
+  void verify(const MCInstrInfo &MII) const;
+
   static bool needsLegalizingToDifferentSize(const LegalizeAction Action) {
     using namespace LegalizeActions;
     switch (Action) {
@@ -556,7 +786,7 @@
   /// setAction ({G_ADD, 0, LLT::scalar(32)}, Legal);
   /// setLegalizeScalarToDifferentSizeStrategy(
   ///   G_ADD, 0, widenToLargerTypesAndNarrowToLargest);
-  /// will end up defining getAction({G_ADD, 0, T}) to return the following 
+  /// will end up defining getAction({G_ADD, 0, T}) to return the following
   /// actions for different scalar types T:
   ///  LLT::scalar(1)..LLT::scalar(31): {WidenScalar, 0, LLT::scalar(32)}
   ///  LLT::scalar(32):                 {Legal, 0, LLT::scalar(32)}
@@ -584,7 +814,7 @@
     VectorElementSizeChangeStrategies[OpcodeIdx][TypeIdx] = S;
   }
 
-  /// A SizeChangeStrategy for the common case where legalization for a 
+  /// A SizeChangeStrategy for the common case where legalization for a
   /// particular operation consists of only supporting a specific set of type
   /// sizes. E.g.
   ///   setAction ({G_DIV, 0, LLT::scalar(32)}, Legal);
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h
index 0a46eb9..1e2d476 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h
@@ -70,6 +70,8 @@
         .set(MachineFunctionProperties::Property::RegBankSelected);
   }
 
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
   bool runOnMachineFunction(MachineFunction &MF) override;
 };
 
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
index 797f5e5..f77f9a8 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
@@ -222,6 +222,12 @@
 }
 
 template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_FSUB, false>
+m_GFSub(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_FSUB, false>(L, R);
+}
+
+template <typename LHS, typename RHS>
 inline BinaryOp_match<LHS, RHS, TargetOpcode::G_AND, true>
 m_GAnd(const LHS &L, const RHS &R) {
   return BinaryOp_match<LHS, RHS, TargetOpcode::G_AND, true>(L, R);
@@ -305,6 +311,11 @@
 }
 
 template <typename SrcTy>
+inline UnaryOp_match<SrcTy, TargetOpcode::G_FNEG> m_GFNeg(const SrcTy &Src) {
+  return UnaryOp_match<SrcTy, TargetOpcode::G_FNEG>(Src);
+}
+
+template <typename SrcTy>
 inline UnaryOp_match<SrcTy, TargetOpcode::COPY> m_Copy(SrcTy &&Src) {
   return UnaryOp_match<SrcTy, TargetOpcode::COPY>(std::forward<SrcTy>(Src));
 }
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index ef4e0ad..ac1673d 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -31,11 +31,10 @@
 class MachineInstr;
 class TargetInstrInfo;
 
-/// Helper class to build MachineInstr.
-/// It keeps internally the insertion point and debug location for all
-/// the new instructions we want to create.
-/// This information can be modify via the related setters.
-class MachineIRBuilder {
+/// Class which stores all the state required in a MachineIRBuilder.
+/// Since MachineIRBuilders will only store state in this object, it allows
+/// to transfer BuilderState between different kinds of MachineIRBuilders.
+struct MachineIRBuilderState {
   /// MachineFunction under construction.
   MachineFunction *MF;
   /// Information used to access the description of the opcodes.
@@ -52,15 +51,23 @@
   /// @}
 
   std::function<void(MachineInstr *)> InsertedInstr;
+};
 
+/// Helper class to build MachineInstr.
+/// It keeps internally the insertion point and debug location for all
+/// the new instructions we want to create.
+/// This information can be modify via the related setters.
+class MachineIRBuilderBase {
+
+  MachineIRBuilderState State;
   const TargetInstrInfo &getTII() {
-    assert(TII && "TargetInstrInfo is not set");
-    return *TII;
+    assert(State.TII && "TargetInstrInfo is not set");
+    return *State.TII;
   }
 
   void validateTruncExt(unsigned Dst, unsigned Src, bool IsExtend);
-  MachineInstrBuilder buildBinaryOp(unsigned Opcode, unsigned Res, unsigned Op0, unsigned Op1);
 
+protected:
   unsigned getDestFromArg(unsigned Reg) { return Reg; }
   unsigned getDestFromArg(LLT Ty) {
     return getMF().getRegInfo().createGenericVirtualRegister(Ty);
@@ -88,30 +95,41 @@
     return MIB->getOperand(0).getReg();
   }
 
+  void validateBinaryOp(unsigned Res, unsigned Op0, unsigned Op1);
+
 public:
   /// Some constructors for easy use.
-  MachineIRBuilder() = default;
-  MachineIRBuilder(MachineFunction &MF) { setMF(MF); }
-  MachineIRBuilder(MachineInstr &MI) : MachineIRBuilder(*MI.getMF()) {
+  MachineIRBuilderBase() = default;
+  MachineIRBuilderBase(MachineFunction &MF) { setMF(MF); }
+  MachineIRBuilderBase(MachineInstr &MI) : MachineIRBuilderBase(*MI.getMF()) {
     setInstr(MI);
   }
 
+  MachineIRBuilderBase(const MachineIRBuilderState &BState) : State(BState) {}
+
   /// Getter for the function we currently build.
   MachineFunction &getMF() {
-    assert(MF && "MachineFunction is not set");
-    return *MF;
+    assert(State.MF && "MachineFunction is not set");
+    return *State.MF;
   }
 
+  /// Getter for DebugLoc
+  const DebugLoc &getDL() { return State.DL; }
+
+  /// Getter for MRI
+  MachineRegisterInfo *getMRI() { return State.MRI; }
+
+  /// Getter for the State
+  MachineIRBuilderState &getState() { return State; }
+
   /// Getter for the basic block we currently build.
   MachineBasicBlock &getMBB() {
-    assert(MBB && "MachineBasicBlock is not set");
-    return *MBB;
+    assert(State.MBB && "MachineBasicBlock is not set");
+    return *State.MBB;
   }
 
   /// Current insertion point for new instructions.
-  MachineBasicBlock::iterator getInsertPt() {
-    return II;
-  }
+  MachineBasicBlock::iterator getInsertPt() { return State.II; }
 
   /// Set the insertion point before the specified position.
   /// \pre MBB must be in getMF().
@@ -136,15 +154,16 @@
   /// \name Control where instructions we create are recorded (typically for
   /// visiting again later during legalization).
   /// @{
+  void recordInsertion(MachineInstr *InsertedInstr) const;
   void recordInsertions(std::function<void(MachineInstr *)> InsertedInstr);
   void stopRecordingInsertions();
   /// @}
 
   /// Set the debug location to \p DL for all the next build instructions.
-  void setDebugLoc(const DebugLoc &DL) { this->DL = DL; }
+  void setDebugLoc(const DebugLoc &DL) { this->State.DL = DL; }
 
   /// Get the current instruction's debug location.
-  DebugLoc getDebugLoc() { return DL; }
+  DebugLoc getDebugLoc() { return State.DL; }
 
   /// Build and insert <empty> = \p Opcode <empty>.
   /// The insertion point is the one set by the last call of either
@@ -155,20 +174,6 @@
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder buildInstr(unsigned Opcode);
 
-  /// DAG like Generic method for building arbitrary instructions as above.
-  /// \Opc opcode for the instruction.
-  /// \Ty Either LLT/TargetRegisterClass/unsigned types for Dst
-  /// \Args Variadic list of uses of types(unsigned/MachineInstrBuilder)
-  /// Uses of type MachineInstrBuilder will perform
-  /// getOperand(0).getReg() to convert to register.
-  template <typename DstTy, typename... UseArgsTy>
-  MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty,
-                                 UseArgsTy &&... Args) {
-    auto MIB = buildInstr(Opc).addDef(getDestFromArg(Ty));
-    addUsesFromArgs(MIB, std::forward<UseArgsTy>(Args)...);
-    return MIB;
-  }
-
   /// Build but don't insert <empty> = \p Opcode <empty>.
   ///
   /// \pre setMF, setBasicBlock or setMI  must have been called.
@@ -226,59 +231,6 @@
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder buildGlobalValue(unsigned Res, const GlobalValue *GV);
 
-  /// Build and insert \p Res = G_ADD \p Op0, \p Op1
-  ///
-  /// G_ADD sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
-  /// truncated to their width.
-  ///
-  /// \pre setBasicBlock or setMI must have been called.
-  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
-  ///      with the same (scalar or vector) type).
-  ///
-  /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildAdd(unsigned Res, unsigned Op0,
-                               unsigned Op1);
-  template <typename DstTy, typename... UseArgsTy>
-  MachineInstrBuilder buildAdd(DstTy &&Ty, UseArgsTy &&... UseArgs) {
-    unsigned Res = getDestFromArg(Ty);
-    return buildAdd(Res, (getRegFromArg(UseArgs))...);
-  }
-
-  /// Build and insert \p Res = G_SUB \p Op0, \p Op1
-  ///
-  /// G_SUB sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
-  /// truncated to their width.
-  ///
-  /// \pre setBasicBlock or setMI must have been called.
-  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
-  ///      with the same (scalar or vector) type).
-  ///
-  /// \return a MachineInstrBuilder for the newly created instruction.
-  template <typename DstTy, typename... UseArgsTy>
-  MachineInstrBuilder buildSub(DstTy &&Ty, UseArgsTy &&... UseArgs) {
-    unsigned Res = getDestFromArg(Ty);
-    return buildSub(Res, (getRegFromArg(UseArgs))...);
-  }
-  MachineInstrBuilder buildSub(unsigned Res, unsigned Op0,
-                               unsigned Op1);
-
-  /// Build and insert \p Res = G_MUL \p Op0, \p Op1
-  ///
-  /// G_MUL sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
-  /// truncated to their width.
-  ///
-  /// \pre setBasicBlock or setMI must have been called.
-  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
-  ///      with the same (scalar or vector) type).
-  ///
-  /// \return a MachineInstrBuilder for the newly created instruction.
-  template <typename DstTy, typename... UseArgsTy>
-  MachineInstrBuilder buildMul(DstTy &&Ty, UseArgsTy &&... UseArgs) {
-    unsigned Res = getDestFromArg(Ty);
-    return buildMul(Res, (getRegFromArg(UseArgs))...);
-  }
-  MachineInstrBuilder buildMul(unsigned Res, unsigned Op0,
-                               unsigned Op1);
 
   /// Build and insert \p Res = G_GEP \p Op0, \p Op1
   ///
@@ -347,38 +299,6 @@
   MachineInstrBuilder buildUAdde(unsigned Res, unsigned CarryOut, unsigned Op0,
                                  unsigned Op1, unsigned CarryIn);
 
-  /// Build and insert \p Res = G_AND \p Op0, \p Op1
-  ///
-  /// G_AND sets \p Res to the bitwise and of integer parameters \p Op0 and \p
-  /// Op1.
-  ///
-  /// \pre setBasicBlock or setMI must have been called.
-  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
-  ///      with the same (scalar or vector) type).
-  ///
-  /// \return a MachineInstrBuilder for the newly created instruction.
-  template <typename DstTy, typename... UseArgsTy>
-  MachineInstrBuilder buildAnd(DstTy &&Dst, UseArgsTy &&... UseArgs) {
-    return buildAnd(getDestFromArg(Dst), getRegFromArg(UseArgs)...);
-  }
-  MachineInstrBuilder buildAnd(unsigned Res, unsigned Op0,
-                               unsigned Op1);
-
-  /// Build and insert \p Res = G_OR \p Op0, \p Op1
-  ///
-  /// G_OR sets \p Res to the bitwise or of integer parameters \p Op0 and \p
-  /// Op1.
-  ///
-  /// \pre setBasicBlock or setMI must have been called.
-  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
-  ///      with the same (scalar or vector) type).
-  ///
-  /// \return a MachineInstrBuilder for the newly created instruction.
-  template <typename DstTy, typename... UseArgsTy>
-  MachineInstrBuilder buildOr(DstTy &&Dst, UseArgsTy &&... UseArgs) {
-    return buildOr(getDestFromArg(Dst), getRegFromArg(UseArgs)...);
-  }
-  MachineInstrBuilder buildOr(unsigned Res, unsigned Op0, unsigned Op1);
 
   /// Build and insert \p Res = G_ANYEXT \p Op0
   ///
@@ -504,7 +424,7 @@
   /// \pre setBasicBlock or setMI must have been called.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildBr(MachineBasicBlock &BB);
+  MachineInstrBuilder buildBr(MachineBasicBlock &Dest);
 
   /// Build and insert G_BRCOND \p Tst, \p Dest
   ///
@@ -518,7 +438,7 @@
   ///      depend on bit 0 (for now).
   ///
   /// \return The newly created instruction.
-  MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &BB);
+  MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &Dest);
 
   /// Build and insert G_BRINDIRECT \p Tgt
   ///
@@ -602,6 +522,18 @@
   MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr,
                                 MachineMemOperand &MMO);
 
+  /// Build and insert `Res = <opcode> Addr, MMO`.
+  ///
+  /// Loads the value stored at \p Addr. Puts the result in \p Res.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildLoadInstr(unsigned Opcode, unsigned Res,
+                                     unsigned Addr, MachineMemOperand &MMO);
+
   /// Build and insert `G_STORE Val, Addr, MMO`.
   ///
   /// Stores the value \p Val to \p Addr.
@@ -626,7 +558,7 @@
   template <typename DstType> MachineInstrBuilder buildUndef(DstType &&Res) {
     return buildUndef(getDestFromArg(Res));
   }
-  MachineInstrBuilder buildUndef(unsigned Dst);
+  MachineInstrBuilder buildUndef(unsigned Res);
 
   /// Build and insert instructions to put \p Ops together at the specified p
   /// Indices to form a larger register.
@@ -785,7 +717,28 @@
   MachineInstrBuilder buildExtractVectorElement(unsigned Res, unsigned Val,
                                                 unsigned Idx);
 
-  /// Build and insert `OldValRes = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal,
+  /// Build and insert `OldValRes<def>, SuccessRes<def> =
+  /// G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr, CmpVal, NewVal, MMO`.
+  ///
+  /// Atomically replace the value at \p Addr with \p NewVal if it is currently
+  /// \p CmpVal otherwise leaves it unchanged. Puts the original value from \p
+  /// Addr in \p Res, along with an s1 indicating whether it was replaced.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p OldValRes must be a generic virtual register of scalar type.
+  /// \pre \p SuccessRes must be a generic virtual register of scalar type. It
+  ///      will be assigned 0 on failure and 1 on success.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  /// \pre \p OldValRes, \p CmpVal, and \p NewVal must be generic virtual
+  ///      registers of the same type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder
+  buildAtomicCmpXchgWithSuccess(unsigned OldValRes, unsigned SuccessRes,
+                                unsigned Addr, unsigned CmpVal, unsigned NewVal,
+                                MachineMemOperand &MMO);
+
+  /// Build and insert `OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal,
   /// MMO`.
   ///
   /// Atomically replace the value at \p Addr with \p NewVal if it is currently
@@ -802,6 +755,338 @@
   MachineInstrBuilder buildAtomicCmpXchg(unsigned OldValRes, unsigned Addr,
                                          unsigned CmpVal, unsigned NewVal,
                                          MachineMemOperand &MMO);
+
+  /// Build and insert `OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO`.
+  ///
+  /// Atomically read-modify-update the value at \p Addr with \p Val. Puts the
+  /// original value from \p Addr in \p OldValRes. The modification is
+  /// determined by the opcode.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p OldValRes must be a generic virtual register.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+  ///      same type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildAtomicRMW(unsigned Opcode, unsigned OldValRes,
+                                     unsigned Addr, unsigned Val,
+                                     MachineMemOperand &MMO);
+
+  /// Build and insert `OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO`.
+  ///
+  /// Atomically replace the value at \p Addr with \p Val. Puts the original
+  /// value from \p Addr in \p OldValRes.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p OldValRes must be a generic virtual register.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+  ///      same type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildAtomicRMWXchg(unsigned OldValRes, unsigned Addr,
+                                         unsigned Val, MachineMemOperand &MMO);
+
+  /// Build and insert `OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO`.
+  ///
+  /// Atomically replace the value at \p Addr with the addition of \p Val and
+  /// the original value. Puts the original value from \p Addr in \p OldValRes.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p OldValRes must be a generic virtual register.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+  ///      same type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildAtomicRMWAdd(unsigned OldValRes, unsigned Addr,
+                                         unsigned Val, MachineMemOperand &MMO);
+
+  /// Build and insert `OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO`.
+  ///
+  /// Atomically replace the value at \p Addr with the subtraction of \p Val and
+  /// the original value. Puts the original value from \p Addr in \p OldValRes.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p OldValRes must be a generic virtual register.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+  ///      same type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildAtomicRMWSub(unsigned OldValRes, unsigned Addr,
+                                         unsigned Val, MachineMemOperand &MMO);
+
+  /// Build and insert `OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO`.
+  ///
+  /// Atomically replace the value at \p Addr with the bitwise and of \p Val and
+  /// the original value. Puts the original value from \p Addr in \p OldValRes.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p OldValRes must be a generic virtual register.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+  ///      same type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildAtomicRMWAnd(unsigned OldValRes, unsigned Addr,
+                                         unsigned Val, MachineMemOperand &MMO);
+
+  /// Build and insert `OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO`.
+  ///
+  /// Atomically replace the value at \p Addr with the bitwise nand of \p Val
+  /// and the original value. Puts the original value from \p Addr in \p
+  /// OldValRes.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p OldValRes must be a generic virtual register.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+  ///      same type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildAtomicRMWNand(unsigned OldValRes, unsigned Addr,
+                                         unsigned Val, MachineMemOperand &MMO);
+
+  /// Build and insert `OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO`.
+  ///
+  /// Atomically replace the value at \p Addr with the bitwise or of \p Val and
+  /// the original value. Puts the original value from \p Addr in \p OldValRes.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p OldValRes must be a generic virtual register.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+  ///      same type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildAtomicRMWOr(unsigned OldValRes, unsigned Addr,
+                                       unsigned Val, MachineMemOperand &MMO);
+
+  /// Build and insert `OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO`.
+  ///
+  /// Atomically replace the value at \p Addr with the bitwise xor of \p Val and
+  /// the original value. Puts the original value from \p Addr in \p OldValRes.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p OldValRes must be a generic virtual register.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+  ///      same type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildAtomicRMWXor(unsigned OldValRes, unsigned Addr,
+                                        unsigned Val, MachineMemOperand &MMO);
+
+  /// Build and insert `OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO`.
+  ///
+  /// Atomically replace the value at \p Addr with the signed maximum of \p
+  /// Val and the original value. Puts the original value from \p Addr in \p
+  /// OldValRes.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p OldValRes must be a generic virtual register.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+  ///      same type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildAtomicRMWMax(unsigned OldValRes, unsigned Addr,
+                                        unsigned Val, MachineMemOperand &MMO);
+
+  /// Build and insert `OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO`.
+  ///
+  /// Atomically replace the value at \p Addr with the signed minimum of \p
+  /// Val and the original value. Puts the original value from \p Addr in \p
+  /// OldValRes.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p OldValRes must be a generic virtual register.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+  ///      same type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildAtomicRMWMin(unsigned OldValRes, unsigned Addr,
+                                        unsigned Val, MachineMemOperand &MMO);
+
+  /// Build and insert `OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO`.
+  ///
+  /// Atomically replace the value at \p Addr with the unsigned maximum of \p
+  /// Val and the original value. Puts the original value from \p Addr in \p
+  /// OldValRes.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p OldValRes must be a generic virtual register.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+  ///      same type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildAtomicRMWUmax(unsigned OldValRes, unsigned Addr,
+                                         unsigned Val, MachineMemOperand &MMO);
+
+  /// Build and insert `OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO`.
+  ///
+  /// Atomically replace the value at \p Addr with the unsigned minimum of \p
+  /// Val and the original value. Puts the original value from \p Addr in \p
+  /// OldValRes.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p OldValRes must be a generic virtual register.
+  /// \pre \p Addr must be a generic virtual register with pointer type.
+  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+  ///      same type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildAtomicRMWUmin(unsigned OldValRes, unsigned Addr,
+                                         unsigned Val, MachineMemOperand &MMO);
+
+  /// Build and insert \p Res = G_BLOCK_ADDR \p BA
+  ///
+  /// G_BLOCK_ADDR computes the address of a basic block.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register of a pointer type.
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildBlockAddress(unsigned Res, const BlockAddress *BA);
+};
+
+/// A CRTP class that contains methods for building instructions that can
+/// be constant folded. MachineIRBuilders that want to inherit from this will
+/// need to implement buildBinaryOp (for constant folding binary ops).
+/// Alternatively, they can implement buildInstr(Opc, Dst, Uses...) to perform
+/// additional folding for Opc.
+template <typename Base>
+class FoldableInstructionsBuilder : public MachineIRBuilderBase {
+  Base &base() { return static_cast<Base &>(*this); }
+
+public:
+  using MachineIRBuilderBase::MachineIRBuilderBase;
+  /// Build and insert \p Res = G_ADD \p Op0, \p Op1
+  ///
+  /// G_ADD sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+  /// truncated to their width.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+
+  MachineInstrBuilder buildAdd(unsigned Dst, unsigned Src0, unsigned Src1) {
+    return base().buildBinaryOp(TargetOpcode::G_ADD, Dst, Src0, Src1);
+  }
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildAdd(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+    unsigned Res = base().getDestFromArg(Ty);
+    return base().buildAdd(Res, (base().getRegFromArg(UseArgs))...);
+  }
+
+  /// Build and insert \p Res = G_SUB \p Op0, \p Op1
+  ///
+  /// G_SUB sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+  /// truncated to their width.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+
+  MachineInstrBuilder buildSub(unsigned Dst, unsigned Src0, unsigned Src1) {
+    return base().buildBinaryOp(TargetOpcode::G_SUB, Dst, Src0, Src1);
+  }
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildSub(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+    unsigned Res = base().getDestFromArg(Ty);
+    return base().buildSub(Res, (base().getRegFromArg(UseArgs))...);
+  }
+
+  /// Build and insert \p Res = G_MUL \p Op0, \p Op1
+  ///
+  /// G_MUL sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
+  /// truncated to their width.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildMul(unsigned Dst, unsigned Src0, unsigned Src1) {
+    return base().buildBinaryOp(TargetOpcode::G_MUL, Dst, Src0, Src1);
+  }
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildMul(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+    unsigned Res = base().getDestFromArg(Ty);
+    return base().buildMul(Res, (base().getRegFromArg(UseArgs))...);
+  }
+
+  /// Build and insert \p Res = G_AND \p Op0, \p Op1
+  ///
+  /// G_AND sets \p Res to the bitwise and of integer parameters \p Op0 and \p
+  /// Op1.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+
+  MachineInstrBuilder buildAnd(unsigned Dst, unsigned Src0, unsigned Src1) {
+    return base().buildBinaryOp(TargetOpcode::G_AND, Dst, Src0, Src1);
+  }
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildAnd(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+    unsigned Res = base().getDestFromArg(Ty);
+    return base().buildAnd(Res, (base().getRegFromArg(UseArgs))...);
+  }
+
+  /// Build and insert \p Res = G_OR \p Op0, \p Op1
+  ///
+  /// G_OR sets \p Res to the bitwise or of integer parameters \p Op0 and \p
+  /// Op1.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+  ///      with the same (scalar or vector) type).
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildOr(unsigned Dst, unsigned Src0, unsigned Src1) {
+    return base().buildBinaryOp(TargetOpcode::G_OR, Dst, Src0, Src1);
+  }
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildOr(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+    unsigned Res = base().getDestFromArg(Ty);
+    return base().buildOr(Res, (base().getRegFromArg(UseArgs))...);
+  }
+};
+
+class MachineIRBuilder : public FoldableInstructionsBuilder<MachineIRBuilder> {
+public:
+  using FoldableInstructionsBuilder<
+      MachineIRBuilder>::FoldableInstructionsBuilder;
+  MachineInstrBuilder buildBinaryOp(unsigned Opcode, unsigned Dst,
+                                    unsigned Src0, unsigned Src1) {
+    validateBinaryOp(Dst, Src0, Src1);
+    return buildInstr(Opcode).addDef(Dst).addUse(Src0).addUse(Src1);
+  }
+  using FoldableInstructionsBuilder<MachineIRBuilder>::buildInstr;
+  /// DAG like Generic method for building arbitrary instructions as above.
+  /// \Opc opcode for the instruction.
+  /// \Ty Either LLT/TargetRegisterClass/unsigned types for Dst
+  /// \Args Variadic list of uses of types(unsigned/MachineInstrBuilder)
+  /// Uses of type MachineInstrBuilder will perform
+  /// getOperand(0).getReg() to convert to register.
+  template <typename DstTy, typename... UseArgsTy>
+  MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty,
+                                 UseArgsTy &&... Args) {
+    auto MIB = buildInstr(Opc).addDef(getDestFromArg(Ty));
+    addUsesFromArgs(MIB, std::forward<UseArgsTy>(Args)...);
+    return MIB;
+  }
 };
 
 } // End namespace llvm.
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBank.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBank.h
index 5d75842..d5612e1 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBank.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBank.h
@@ -42,7 +42,7 @@
 
 public:
   RegisterBank(unsigned ID, const char *Name, unsigned Size,
-               const uint32_t *ContainedRegClasses, unsigned NumRegClasses);
+               const uint32_t *CoveredClasses, unsigned NumRegClasses);
 
   /// Get the identifier of this register bank.
   unsigned getID() const { return ID; }
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h
index 837035f..51e3a27 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -19,6 +19,7 @@
 
 namespace llvm {
 
+class AnalysisUsage;
 class MachineFunction;
 class MachineInstr;
 class MachineOperand;
@@ -102,5 +103,10 @@
 
 /// Returns an APFloat from Val converted to the appropriate size.
 APFloat getAPFloatFromSize(double Val, unsigned Size);
+
+/// Modify analysis usage so it preserves passes required for the SelectionDAG
+/// fallback.
+void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU);
+
 } // End namespace llvm.
 #endif