Update prebuilt Clang to r416183b from Android.

https://android.googlesource.com/platform/prebuilts/clang/host/
linux-x86/+/06a71ddac05c22edb2d10b590e1769b3f8619bef

clang 12.0.5 (based on r416183b) from build 7284624.

Change-Id: I277a316abcf47307562d8b748b84870f31a72866
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CSEInfo.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CSEInfo.h
index 5a44e67..f76dec5 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CSEInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CSEInfo.h
@@ -16,14 +16,12 @@
 #include "llvm/CodeGen/CSEConfigBase.h"
 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
 #include "llvm/CodeGen/GlobalISel/GISelWorkList.h"
-#include "llvm/CodeGen/GlobalISel/Utils.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/IR/PassManager.h"
-#include "llvm/Pass.h"
 #include "llvm/Support/Allocator.h"
+#include "llvm/Support/CodeGen.h"
 
 namespace llvm {
+class MachineBasicBlock;
 
 /// A class that wraps MachineInstrs and derives from FoldingSetNode in order to
 /// be uniqued in a CSEMap. The tradeoff here is extra memory allocations for
@@ -120,6 +118,8 @@
 
   void setMF(MachineFunction &MF);
 
+  Error verify();
+
   /// Records a newly created inst in a list and lazily insert it to the CSEMap.
   /// Sometimes, this method might be called with a partially constructed
   /// MachineInstr,
@@ -173,14 +173,16 @@
       : ID(ID), MRI(MRI) {}
   // Profiling methods.
   const GISelInstProfileBuilder &addNodeIDOpcode(unsigned Opc) const;
-  const GISelInstProfileBuilder &addNodeIDRegType(const LLT &Ty) const;
-  const GISelInstProfileBuilder &addNodeIDRegType(const unsigned) const;
+  const GISelInstProfileBuilder &addNodeIDRegType(const LLT Ty) const;
+  const GISelInstProfileBuilder &addNodeIDRegType(const Register) const;
 
   const GISelInstProfileBuilder &
   addNodeIDRegType(const TargetRegisterClass *RC) const;
   const GISelInstProfileBuilder &addNodeIDRegType(const RegisterBank *RB) const;
 
-  const GISelInstProfileBuilder &addNodeIDRegNum(unsigned Reg) const;
+  const GISelInstProfileBuilder &addNodeIDRegNum(Register Reg) const;
+
+  const GISelInstProfileBuilder &addNodeIDReg(Register Reg) const;
 
   const GISelInstProfileBuilder &addNodeIDImmediate(int64_t Imm) const;
   const GISelInstProfileBuilder &
@@ -220,9 +222,7 @@
 
 public:
   static char ID;
-  GISelCSEAnalysisWrapperPass() : MachineFunctionPass(ID) {
-    initializeGISelCSEAnalysisWrapperPassPass(*PassRegistry::getPassRegistry());
-  }
+  GISelCSEAnalysisWrapperPass();
 
   void getAnalysisUsage(AnalysisUsage &AU) const override;
 
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h
index d8d15bd..57ff390 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -17,9 +17,11 @@
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/MachineOperand.h"
 #include "llvm/CodeGen/TargetCallingConv.h"
-#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Attributes.h"
 #include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Type.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/MachineValueType.h"
 #include <cstdint>
@@ -27,14 +29,14 @@
 
 namespace llvm {
 
+class CallBase;
 class DataLayout;
 class Function;
+class FunctionLoweringInfo;
 class MachineIRBuilder;
-class MachineOperand;
 struct MachinePointerInfo;
 class MachineRegisterInfo;
 class TargetLowering;
-class Type;
 class Value;
 
 class CallLowering {
@@ -42,38 +44,104 @@
 
   virtual void anchor();
 public:
-  struct ArgInfo {
-    SmallVector<Register, 4> Regs;
+  struct BaseArgInfo {
     Type *Ty;
-    ISD::ArgFlagsTy Flags;
+    SmallVector<ISD::ArgFlagsTy, 4> Flags;
     bool IsFixed;
 
+    BaseArgInfo(Type *Ty,
+                ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
+                bool IsFixed = true)
+        : Ty(Ty), Flags(Flags.begin(), Flags.end()), IsFixed(IsFixed) {}
+
+    BaseArgInfo() : Ty(nullptr), IsFixed(false) {}
+  };
+
+  struct ArgInfo : public BaseArgInfo {
+    SmallVector<Register, 4> Regs;
+    // If the argument had to be split into multiple parts according to the
+    // target calling convention, then this contains the original vregs
+    // if the argument was an incoming arg.
+    SmallVector<Register, 2> OrigRegs;
+
     ArgInfo(ArrayRef<Register> Regs, Type *Ty,
-            ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy{}, bool IsFixed = true)
-        : Regs(Regs.begin(), Regs.end()), Ty(Ty), Flags(Flags),
-          IsFixed(IsFixed) {
+            ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
+            bool IsFixed = true)
+        : BaseArgInfo(Ty, Flags, IsFixed), Regs(Regs.begin(), Regs.end()) {
+      if (!Regs.empty() && Flags.empty())
+        this->Flags.push_back(ISD::ArgFlagsTy());
       // FIXME: We should have just one way of saying "no register".
-      assert((Ty->isVoidTy() == (Regs.empty() || Regs[0] == 0)) &&
+      assert(((Ty->isVoidTy() || Ty->isEmptyTy()) ==
+              (Regs.empty() || Regs[0] == 0)) &&
              "only void types should have no register");
     }
+
+    ArgInfo() : BaseArgInfo() {}
+  };
+
+  struct CallLoweringInfo {
+    /// Calling convention to be used for the call.
+    CallingConv::ID CallConv = CallingConv::C;
+
+    /// Destination of the call. It should be either a register, globaladdress,
+    /// or externalsymbol.
+    MachineOperand Callee = MachineOperand::CreateImm(0);
+
+    /// Descriptor for the return type of the function.
+    ArgInfo OrigRet;
+
+    /// List of descriptors of the arguments passed to the function.
+    SmallVector<ArgInfo, 8> OrigArgs;
+
+    /// Valid if the call has a swifterror inout parameter, and contains the
+    /// vreg that the swifterror should be copied into after the call.
+    Register SwiftErrorVReg;
+
+    MDNode *KnownCallees = nullptr;
+
+    /// True if the call must be tail call optimized.
+    bool IsMustTailCall = false;
+
+    /// True if the call passes all target-independent checks for tail call
+    /// optimization.
+    bool IsTailCall = false;
+
+    /// True if the call was lowered as a tail call. This is consumed by the
+    /// legalizer. This allows the legalizer to lower libcalls as tail calls.
+    bool LoweredTailCall = false;
+
+    /// True if the call is to a vararg function.
+    bool IsVarArg = false;
+
+    /// True if the function's return value can be lowered to registers.
+    bool CanLowerReturn = true;
+
+    /// VReg to hold the hidden sret parameter.
+    Register DemoteRegister;
+
+    /// The stack index for sret demotion.
+    int DemoteStackIndex;
   };
 
   /// Argument handling is mostly uniform between the four places that
   /// make these decisions: function formal arguments, call
   /// instruction args, call instruction returns and function
   /// returns. However, once a decision has been made on where an
-  /// arugment should go, exactly what happens can vary slightly. This
+  /// argument should go, exactly what happens can vary slightly. This
   /// class abstracts the differences.
   struct ValueHandler {
-    ValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
-                 CCAssignFn *AssignFn)
-      : MIRBuilder(MIRBuilder), MRI(MRI), AssignFn(AssignFn) {}
+    ValueHandler(bool IsIncoming, MachineIRBuilder &MIRBuilder,
+                 MachineRegisterInfo &MRI, CCAssignFn *AssignFn)
+        : MIRBuilder(MIRBuilder), MRI(MRI), AssignFn(AssignFn),
+          IsIncomingArgumentHandler(IsIncoming) {}
 
     virtual ~ValueHandler() = default;
 
-    /// Returns true if the handler is dealing with formal arguments,
-    /// not with return values etc.
-    virtual bool isArgumentHandler() const { return false; }
+    /// Returns true if the handler is dealing with incoming arguments,
+    /// i.e. those that move values from some physical location to vregs.
+    bool isIncomingArgumentHandler() const {
+      return IsIncomingArgumentHandler;
+    }
 
     /// Materialize a VReg containing the address of the specified
     /// stack-based object. This is either based on a FrameIndex or
@@ -96,6 +164,15 @@
                                       uint64_t Size, MachinePointerInfo &MPO,
                                       CCValAssign &VA) = 0;
 
+    /// An overload which takes an ArgInfo if additional information about
+    /// the arg is needed.
+    virtual void assignValueToAddress(const ArgInfo &Arg, Register Addr,
+                                      uint64_t Size, MachinePointerInfo &MPO,
+                                      CCValAssign &VA) {
+      assert(Arg.Regs.size() == 1);
+      assignValueToAddress(Arg.Regs[0], Addr, Size, MPO, VA);
+    }
+
     /// Handle custom values, which may be passed into one or more of \p VAs.
     /// \return The number of \p VAs that have been assigned after the first
     ///         one, and which should therefore be skipped from further
@@ -107,12 +184,15 @@
       llvm_unreachable("Custom values not supported");
     }
 
-    Register extendRegister(Register ValReg, CCValAssign &VA);
+    /// Extend a register to the location type given in VA, capped at extending
+    /// to at most MaxSize bits. If MaxSizeBits is 0 then no maximum is set.
+    Register extendRegister(Register ValReg, CCValAssign &VA,
+                            unsigned MaxSizeBits = 0);
 
     virtual bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
                            CCValAssign::LocInfo LocInfo, const ArgInfo &Info,
-                           CCState &State) {
-      return AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
+                           ISD::ArgFlagsTy Flags, CCState &State) {
+      return AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
     }
 
     MachineIRBuilder &MIRBuilder;
@@ -120,9 +200,22 @@
     CCAssignFn *AssignFn;
 
   private:
+    bool IsIncomingArgumentHandler;
     virtual void anchor();
   };
 
+  struct IncomingValueHandler : public ValueHandler {
+    IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
+                         CCAssignFn *AssignFn)
+        : ValueHandler(true, MIRBuilder, MRI, AssignFn) {}
+  };
+
+  struct OutgoingValueHandler : public ValueHandler {
+    OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
+                         CCAssignFn *AssignFn)
+        : ValueHandler(false, MIRBuilder, MRI, AssignFn) {}
+  };
+
 protected:
   /// Getter for generic TargetLowering class.
   const TargetLowering *getTLI() const {
@@ -135,6 +228,17 @@
     return static_cast<const XXXTargetLowering *>(TLI);
   }
 
+  /// \returns Flags corresponding to the attributes on the \p ArgIdx-th
+  /// parameter of \p Call.
+  ISD::ArgFlagsTy getAttributesForArgIdx(const CallBase &Call,
+                                         unsigned ArgIdx) const;
+
+  /// Adds flags to \p Flags based off of the attributes in \p Attrs.
+  /// \p OpIdx is the index in \p Attrs to add flags from.
+  void addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags,
+                                 const AttributeList &Attrs,
+                                 unsigned OpIdx) const;
+
   template <typename FuncInfoTy>
   void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL,
                    const FuncInfoTy &FuncInfo) const;
@@ -158,11 +262,44 @@
                   MachineIRBuilder &MIRBuilder) const;
 
   /// Invoke Handler::assignArg on each of the given \p Args and then use
-  /// \p Callback to move them to the assigned locations.
+  /// \p Handler to move them to the assigned locations.
   ///
   /// \return True if everything has succeeded, false otherwise.
-  bool handleAssignments(MachineIRBuilder &MIRBuilder, ArrayRef<ArgInfo> Args,
+  bool handleAssignments(MachineIRBuilder &MIRBuilder,
+                         SmallVectorImpl<ArgInfo> &Args,
                          ValueHandler &Handler) const;
+  bool handleAssignments(CCState &CCState,
+                         SmallVectorImpl<CCValAssign> &ArgLocs,
+                         MachineIRBuilder &MIRBuilder,
+                         SmallVectorImpl<ArgInfo> &Args,
+                         ValueHandler &Handler) const;
+
+  /// Analyze passed or returned values from a call, supplied in \p ArgInfo,
+  /// incorporating info about the passed values into \p CCState.
+  ///
+  /// Used to check if arguments are suitable for tail call lowering.
+  bool analyzeArgInfo(CCState &CCState, SmallVectorImpl<ArgInfo> &Args,
+                      CCAssignFn &AssignFnFixed,
+                      CCAssignFn &AssignFnVarArg) const;
+
+  /// \returns True if the calling convention for a callee and its caller pass
+  /// results in the same way. Typically used for tail call eligibility checks.
+  ///
+  /// \p Info is the CallLoweringInfo for the call.
+  /// \p MF is the MachineFunction for the caller.
+  /// \p InArgs contains the results of the call.
+  /// \p CalleeAssignFnFixed is the CCAssignFn to be used for the callee for
+  /// fixed arguments.
+  /// \p CalleeAssignFnVarArg is similar, but for varargs.
+  /// \p CallerAssignFnFixed is the CCAssignFn to be used for the caller for
+  /// fixed arguments.
+  /// \p CallerAssignFnVarArg is similar, but for varargs.
+  bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF,
+                         SmallVectorImpl<ArgInfo> &InArgs,
+                         CCAssignFn &CalleeAssignFnFixed,
+                         CCAssignFn &CalleeAssignFnVarArg,
+                         CCAssignFn &CallerAssignFnFixed,
+                         CCAssignFn &CallerAssignFnVarArg) const;
 
 public:
   CallLowering(const TargetLowering *TLI) : TLI(TLI) {}
@@ -175,20 +312,73 @@
     return false;
   }
 
+  /// Load the returned value from the stack into virtual registers in \p VRegs.
+  /// It uses the frame index \p FI and the start offset from \p DemoteReg.
+  /// The loaded data size will be determined from \p RetTy.
+  void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy,
+                       ArrayRef<Register> VRegs, Register DemoteReg,
+                       int FI) const;
+
+  /// Store the return value given by \p VRegs into stack starting at the offset
+  /// specified in \p DemoteReg.
+  void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy,
+                        ArrayRef<Register> VRegs, Register DemoteReg) const;
+
+  /// Insert the hidden sret ArgInfo to the beginning of \p SplitArgs.
+  /// This function should be called from the target specific
+  /// lowerFormalArguments when \p F requires the sret demotion.
+  void insertSRetIncomingArgument(const Function &F,
+                                  SmallVectorImpl<ArgInfo> &SplitArgs,
+                                  Register &DemoteReg, MachineRegisterInfo &MRI,
+                                  const DataLayout &DL) const;
+
+  /// For the call-base described by \p CB, insert the hidden sret ArgInfo to
+  /// the OrigArgs field of \p Info.
+  void insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder,
+                                  const CallBase &CB,
+                                  CallLoweringInfo &Info) const;
+
+  /// \return True if the return type described by \p Outs can be returned
+  /// without performing sret demotion.
+  bool checkReturn(CCState &CCInfo, SmallVectorImpl<BaseArgInfo> &Outs,
+                   CCAssignFn *Fn) const;
+
+  /// Get the type and the ArgFlags for the split components of \p RetTy as
+  /// returned by \c ComputeValueVTs.
+  void getReturnInfo(CallingConv::ID CallConv, Type *RetTy, AttributeList Attrs,
+                     SmallVectorImpl<BaseArgInfo> &Outs,
+                     const DataLayout &DL) const;
+
+  /// Toplevel function to check the return type based on the target calling
+  /// convention. \return True if the return value of \p MF can be returned
+  /// without performing sret demotion.
+  bool checkReturnTypeForCallConv(MachineFunction &MF) const;
+
+  /// This hook must be implemented to check whether the return values
+  /// described by \p Outs can fit into the return registers. If false
+  /// is returned, an sret-demotion is performed.
+  virtual bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv,
+                              SmallVectorImpl<BaseArgInfo> &Outs,
+                              bool IsVarArg) const {
+    return true;
+  }
+
   /// This hook must be implemented to lower outgoing return values, described
   /// by \p Val, into the specified virtual registers \p VRegs.
   /// This hook is used by GlobalISel.
   ///
+  /// \p FLI is required for sret demotion.
+  ///
   /// \p SwiftErrorVReg is non-zero if the function has a swifterror parameter
   /// that needs to be implicitly returned.
   ///
   /// \return True if the lowering succeeds, false otherwise.
   virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
-                           ArrayRef<Register> VRegs,
+                           ArrayRef<Register> VRegs, FunctionLoweringInfo &FLI,
                            Register SwiftErrorVReg) const {
     if (!supportSwiftError()) {
       assert(SwiftErrorVReg == 0 && "attempt to use unsupported swifterror");
-      return lowerReturn(MIRBuilder, Val, VRegs);
+      return lowerReturn(MIRBuilder, Val, VRegs, FLI);
     }
     return false;
   }
@@ -196,10 +386,13 @@
   /// This hook behaves as the extended lowerReturn function, but for targets
   /// that do not support swifterror value promotion.
   virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
-                           ArrayRef<Register> VRegs) const {
+                           ArrayRef<Register> VRegs,
+                           FunctionLoweringInfo &FLI) const {
     return false;
   }
 
+  virtual bool fallBackToDAGISel(const Function &F) const { return false; }
+
   /// This hook must be implemented to lower the incoming (formal)
   /// arguments, described by \p VRegs, for GlobalISel. Each argument
   /// must end up in the related virtual registers described by \p VRegs.
@@ -207,49 +400,23 @@
   /// the second in \c VRegs[1], and so on. For each argument, there will be one
   /// register for each non-aggregate type, as returned by \c computeValueLLTs.
   /// \p MIRBuilder is set to the proper insertion for the argument
-  /// lowering.
+  /// lowering. \p FLI is required for sret demotion.
   ///
   /// \return True if the lowering succeeded, false otherwise.
   virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder,
                                     const Function &F,
-                                    ArrayRef<ArrayRef<Register>> VRegs) const {
+                                    ArrayRef<ArrayRef<Register>> VRegs,
+                                    FunctionLoweringInfo &FLI) const {
     return false;
   }
 
   /// This hook must be implemented to lower the given call instruction,
   /// including argument and return value marshalling.
   ///
-  /// \p CallConv is the calling convention to be used for the call.
-  ///
-  /// \p Callee is the destination of the call. It should be either a register,
-  /// globaladdress, or externalsymbol.
-  ///
-  /// \p OrigRet is a descriptor for the return type of the function.
-  ///
-  /// \p OrigArgs is a list of descriptors of the arguments passed to the
-  /// function.
-  ///
-  /// \p SwiftErrorVReg is non-zero if the call has a swifterror inout
-  /// parameter, and contains the vreg that the swifterror should be copied into
-  /// after the call.
   ///
   /// \return true if the lowering succeeded, false otherwise.
-  virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
-                         const MachineOperand &Callee, const ArgInfo &OrigRet,
-                         ArrayRef<ArgInfo> OrigArgs,
-                         Register SwiftErrorVReg) const {
-    if (!supportSwiftError()) {
-      assert(SwiftErrorVReg == 0 && "trying to use unsupported swifterror");
-      return lowerCall(MIRBuilder, CallConv, Callee, OrigRet, OrigArgs);
-    }
-    return false;
-  }
-
-  /// This hook behaves as the extended lowerCall function, but for targets that
-  /// do not support swifterror value promotion.
-  virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
-                         const MachineOperand &Callee, const ArgInfo &OrigRet,
-                         ArrayRef<ArgInfo> OrigArgs) const {
+  virtual bool lowerCall(MachineIRBuilder &MIRBuilder,
+                         CallLoweringInfo &Info) const {
     return false;
   }
 
@@ -277,7 +444,7 @@
   /// range of an immediate jump.
   ///
   /// \return true if the lowering succeeded, false otherwise.
-  bool lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
+  bool lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &Call,
                  ArrayRef<Register> ResRegs,
                  ArrayRef<ArrayRef<Register>> ArgRegs, Register SwiftErrorVReg,
                  std::function<unsigned()> GetCalleeReg) const;
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Combiner.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Combiner.h
index 12a1f97..efe8bdf 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Combiner.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Combiner.h
@@ -1,4 +1,4 @@
-//== ----- llvm/CodeGen/GlobalISel/Combiner.h --------------------- == //
+//== ----- llvm/CodeGen/GlobalISel/Combiner.h -------------------*- C++ -*-== //
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 0c50c9c..0d240e9 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -17,16 +17,24 @@
 #ifndef LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
 #define LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
 
+#include "llvm/ADT/APFloat.h"
 #include "llvm/CodeGen/LowLevelType.h"
 #include "llvm/CodeGen/Register.h"
+#include "llvm/Support/Alignment.h"
 
 namespace llvm {
 
 class GISelChangeObserver;
 class MachineIRBuilder;
+class MachineInstrBuilder;
 class MachineRegisterInfo;
 class MachineInstr;
 class MachineOperand;
+class GISelKnownBits;
+class MachineDominatorTree;
+class LegalizerInfo;
+struct LegalityQuery;
+class TargetLowering;
 
 struct PreferredTuple {
   LLT Ty;                // The result type of the extend.
@@ -34,13 +42,73 @@
   MachineInstr *MI;
 };
 
+struct IndexedLoadStoreMatchInfo {
+  Register Addr;
+  Register Base;
+  Register Offset;
+  bool IsPre;
+};
+
+struct PtrAddChain {
+  int64_t Imm;
+  Register Base;
+};
+
+struct RegisterImmPair {
+  Register Reg;
+  int64_t Imm;
+};
+
+struct ShiftOfShiftedLogic {
+  MachineInstr *Logic;
+  MachineInstr *Shift2;
+  Register LogicNonShiftReg;
+  uint64_t ValSum;
+};
+
+using OperandBuildSteps =
+    SmallVector<std::function<void(MachineInstrBuilder &)>, 4>;
+struct InstructionBuildSteps {
+  unsigned Opcode = 0;          /// The opcode for the produced instruction.
+  OperandBuildSteps OperandFns; /// Operands to be added to the instruction.
+  InstructionBuildSteps() = default;
+  InstructionBuildSteps(unsigned Opcode, const OperandBuildSteps &OperandFns)
+      : Opcode(Opcode), OperandFns(OperandFns) {}
+};
+
+struct InstructionStepsMatchInfo {
+  /// Describes instructions to be built during a combine.
+  SmallVector<InstructionBuildSteps, 2> InstrsToBuild;
+  InstructionStepsMatchInfo() = default;
+  InstructionStepsMatchInfo(
+      std::initializer_list<InstructionBuildSteps> InstrsToBuild)
+      : InstrsToBuild(InstrsToBuild) {}
+};
+
 class CombinerHelper {
+protected:
   MachineIRBuilder &Builder;
   MachineRegisterInfo &MRI;
   GISelChangeObserver &Observer;
+  GISelKnownBits *KB;
+  MachineDominatorTree *MDT;
+  const LegalizerInfo *LI;
 
 public:
-  CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B);
+  CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B,
+                 GISelKnownBits *KB = nullptr,
+                 MachineDominatorTree *MDT = nullptr,
+                 const LegalizerInfo *LI = nullptr);
+
+  GISelKnownBits *getKnownBits() const {
+    return KB;
+  }
+
+  const TargetLowering &getTargetLowering() const;
+
+  /// \return true if the combine is running prior to legalization, or if \p
+  /// Query is legal on the target.
+  bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const;
 
   /// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes
   void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const;
@@ -56,18 +124,381 @@
   bool matchCombineCopy(MachineInstr &MI);
   void applyCombineCopy(MachineInstr &MI);
 
+  /// Returns true if \p DefMI precedes \p UseMI or they are the same
+  /// instruction. Both must be in the same basic block.
+  bool isPredecessor(const MachineInstr &DefMI, const MachineInstr &UseMI);
+
+  /// Returns true if \p DefMI dominates \p UseMI. By definition an
+  /// instruction dominates itself.
+  ///
+  /// If we haven't been provided with a MachineDominatorTree during
+  /// construction, this function returns a conservative result that tracks just
+  /// a single basic block.
+  bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI);
+
   /// If \p MI is extend that consumes the result of a load, try to combine it.
   /// Returns true if MI changed.
   bool tryCombineExtendingLoads(MachineInstr &MI);
   bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo);
   void applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo);
 
-  bool matchCombineBr(MachineInstr &MI);
-  bool tryCombineBr(MachineInstr &MI);
+  /// Combine \p MI into a pre-indexed or post-indexed load/store operation if
+  /// legal and the surrounding code makes it useful.
+  bool tryCombineIndexedLoadStore(MachineInstr &MI);
+  bool matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo);
+  void applyCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo);
+
+  bool matchSextTruncSextLoad(MachineInstr &MI);
+  bool applySextTruncSextLoad(MachineInstr &MI);
+
+  /// Match sext_inreg(load p), imm -> sextload p
+  bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
+  bool applySextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
+
+  /// If a brcond's true block is not the fallthrough, make it so by inverting
+  /// the condition and swapping operands.
+  bool matchOptBrCondByInvertingCond(MachineInstr &MI);
+  void applyOptBrCondByInvertingCond(MachineInstr &MI);
+
+  /// If \p MI is G_CONCAT_VECTORS, try to combine it.
+  /// Returns true if MI changed.
+  /// Right now, we support:
+  /// - concat_vector(undef, undef) => undef
+  /// - concat_vector(build_vector(A, B), build_vector(C, D)) =>
+  ///   build_vector(A, B, C, D)
+  ///
+  /// \pre MI.getOpcode() == G_CONCAT_VECTORS.
+  bool tryCombineConcatVectors(MachineInstr &MI);
+  /// Check if the G_CONCAT_VECTORS \p MI is undef or if it
+  /// can be flattened into a build_vector.
+  /// In the first case \p IsUndef will be true.
+  /// In the second case \p Ops will contain the operands needed
+  /// to produce the flattened build_vector.
+  ///
+  /// \pre MI.getOpcode() == G_CONCAT_VECTORS.
+  bool matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef,
+                                 SmallVectorImpl<Register> &Ops);
+  /// Replace \p MI with a flattened build_vector with \p Ops or an
+  /// implicit_def if IsUndef is true.
+  void applyCombineConcatVectors(MachineInstr &MI, bool IsUndef,
+                                 const ArrayRef<Register> Ops);
+
+  /// Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
+  /// Returns true if MI changed.
+  ///
+  /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
+  bool tryCombineShuffleVector(MachineInstr &MI);
+  /// Check if the G_SHUFFLE_VECTOR \p MI can be replaced by a
+  /// concat_vectors.
+  /// \p Ops will contain the operands needed to produce the flattened
+  /// concat_vectors.
+  ///
+  /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
+  bool matchCombineShuffleVector(MachineInstr &MI,
+                                 SmallVectorImpl<Register> &Ops);
+  /// Replace \p MI with a concat_vectors with \p Ops.
+  void applyCombineShuffleVector(MachineInstr &MI,
+                                 const ArrayRef<Register> Ops);
+
+  /// Optimize memcpy intrinsics et al, e.g. constant len calls.
+  /// /p MaxLen if non-zero specifies the max length of a mem libcall to inline.
+  ///
+  /// For example (pre-indexed):
+  ///
+  ///     $addr = G_PTR_ADD $base, $offset
+  ///     [...]
+  ///     $val = G_LOAD $addr
+  ///     [...]
+  ///     $whatever = COPY $addr
+  ///
+  /// -->
+  ///
+  ///     $val, $addr = G_INDEXED_LOAD $base, $offset, 1 (IsPre)
+  ///     [...]
+  ///     $whatever = COPY $addr
+  ///
+  /// or (post-indexed):
+  ///
+  ///     G_STORE $val, $base
+  ///     [...]
+  ///     $addr = G_PTR_ADD $base, $offset
+  ///     [...]
+  ///     $whatever = COPY $addr
+  ///
+  /// -->
+  ///
+  ///     $addr = G_INDEXED_STORE $val, $base, $offset
+  ///     [...]
+  ///     $whatever = COPY $addr
+  bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0);
+
+  bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo);
+  bool applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo);
+
+  /// Fold (shift (shift base, x), y) -> (shift base (x+y))
+  bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
+  bool applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
+
+  /// If we have a shift-by-constant of a bitwise logic op that itself has a
+  /// shift-by-constant operand with identical opcode, we may be able to convert
+  /// that into 2 independent shifts followed by the logic op.
+  bool matchShiftOfShiftedLogic(MachineInstr &MI,
+                                ShiftOfShiftedLogic &MatchInfo);
+  bool applyShiftOfShiftedLogic(MachineInstr &MI,
+                                ShiftOfShiftedLogic &MatchInfo);
+
+  /// Transform a multiply by a power-of-2 value to a left shift.
+  bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
+  bool applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
+
+  // Transform a G_SHL with an extended source into a narrower shift if
+  // possible.
+  bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData);
+  bool applyCombineShlOfExtend(MachineInstr &MI,
+                               const RegisterImmPair &MatchData);
+
+  /// Reduce a shift by a constant to an unmerge and a shift on a half sized
+  /// type. This will not produce a shift smaller than \p TargetShiftSize.
+  bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize,
+                                 unsigned &ShiftVal);
+  bool applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal);
+  bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount);
+
+  /// Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
+  bool
+  matchCombineUnmergeMergeToPlainValues(MachineInstr &MI,
+                                        SmallVectorImpl<Register> &Operands);
+  bool
+  applyCombineUnmergeMergeToPlainValues(MachineInstr &MI,
+                                        SmallVectorImpl<Register> &Operands);
+
+  /// Transform G_UNMERGE Constant -> Constant1, Constant2, ...
+  bool matchCombineUnmergeConstant(MachineInstr &MI,
+                                   SmallVectorImpl<APInt> &Csts);
+  bool applyCombineUnmergeConstant(MachineInstr &MI,
+                                   SmallVectorImpl<APInt> &Csts);
+
+  /// Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
+  bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
+  bool applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
+
+  /// Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0
+  bool matchCombineUnmergeZExtToZExt(MachineInstr &MI);
+  bool applyCombineUnmergeZExtToZExt(MachineInstr &MI);
+
+  /// Transform fp_instr(cst) to constant result of the fp operation.
+  bool matchCombineConstantFoldFpUnary(MachineInstr &MI,
+                                       Optional<APFloat> &Cst);
+  bool applyCombineConstantFoldFpUnary(MachineInstr &MI,
+                                       Optional<APFloat> &Cst);
+
+  /// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
+  bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg);
+  bool applyCombineI2PToP2I(MachineInstr &MI, Register &Reg);
+
+  /// Transform PtrToInt(IntToPtr(x)) to x.
+  bool matchCombineP2IToI2P(MachineInstr &MI, Register &Reg);
+  bool applyCombineP2IToI2P(MachineInstr &MI, Register &Reg);
+
+  /// Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y)
+  /// Transform G_ADD y, (G_PTRTOINT x) -> G_PTRTOINT (G_PTR_ADD x, y)
+  bool matchCombineAddP2IToPtrAdd(MachineInstr &MI,
+                                  std::pair<Register, bool> &PtrRegAndCommute);
+  bool applyCombineAddP2IToPtrAdd(MachineInstr &MI,
+                                  std::pair<Register, bool> &PtrRegAndCommute);
+
+  // Transform G_PTR_ADD (G_PTRTOINT C1), C2 -> C1 + C2
+  bool matchCombineConstPtrAddToI2P(MachineInstr &MI, int64_t &NewCst);
+  bool applyCombineConstPtrAddToI2P(MachineInstr &MI, int64_t &NewCst);
+
+  /// Transform anyext(trunc(x)) to x.
+  bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
+  bool applyCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
+
+  /// Transform [asz]ext([asz]ext(x)) to [asz]ext x.
+  bool matchCombineExtOfExt(MachineInstr &MI,
+                            std::tuple<Register, unsigned> &MatchInfo);
+  bool applyCombineExtOfExt(MachineInstr &MI,
+                            std::tuple<Register, unsigned> &MatchInfo);
+
+  /// Transform fneg(fneg(x)) to x.
+  bool matchCombineFNegOfFNeg(MachineInstr &MI, Register &Reg);
+
+  /// Match fabs(fabs(x)) to fabs(x).
+  bool matchCombineFAbsOfFAbs(MachineInstr &MI, Register &Src);
+  bool applyCombineFAbsOfFAbs(MachineInstr &MI, Register &Src);
+
+  /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
+  bool matchCombineTruncOfExt(MachineInstr &MI,
+                              std::pair<Register, unsigned> &MatchInfo);
+  bool applyCombineTruncOfExt(MachineInstr &MI,
+                              std::pair<Register, unsigned> &MatchInfo);
+
+  /// Transform trunc (shl x, K) to shl (trunc x),
+  /// K => K < VT.getScalarSizeInBits().
+  bool matchCombineTruncOfShl(MachineInstr &MI,
+                              std::pair<Register, Register> &MatchInfo);
+  bool applyCombineTruncOfShl(MachineInstr &MI,
+                              std::pair<Register, Register> &MatchInfo);
+
+  /// Transform G_MUL(x, -1) to G_SUB(0, x)
+  bool applyCombineMulByNegativeOne(MachineInstr &MI);
+
+  /// Return true if any explicit use operand on \p MI is defined by a
+  /// G_IMPLICIT_DEF.
+  bool matchAnyExplicitUseIsUndef(MachineInstr &MI);
+
+  /// Return true if all register explicit use operands on \p MI are defined by
+  /// a G_IMPLICIT_DEF.
+  bool matchAllExplicitUsesAreUndef(MachineInstr &MI);
+
+  /// Return true if a G_SHUFFLE_VECTOR instruction \p MI has an undef mask.
+  bool matchUndefShuffleVectorMask(MachineInstr &MI);
+
+  /// Return true if a G_STORE instruction \p MI is storing an undef value.
+  bool matchUndefStore(MachineInstr &MI);
+
+  /// Return true if a G_SELECT instruction \p MI has an undef comparison.
+  bool matchUndefSelectCmp(MachineInstr &MI);
+
+  /// Return true if a G_SELECT instruction \p MI has a constant comparison. If
+  /// true, \p OpIdx will store the operand index of the known selected value.
+  bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx);
+
+  /// Replace an instruction with a G_FCONSTANT with value \p C.
+  bool replaceInstWithFConstant(MachineInstr &MI, double C);
+
+  /// Replace an instruction with a G_CONSTANT with value \p C.
+  bool replaceInstWithConstant(MachineInstr &MI, int64_t C);
+
+  /// Replace an instruction with a G_IMPLICIT_DEF.
+  bool replaceInstWithUndef(MachineInstr &MI);
+
+  /// Delete \p MI and replace all of its uses with its \p OpIdx-th operand.
+  bool replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx);
+
+  /// Delete \p MI and replace all of its uses with \p Replacement.
+  bool replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement);
+
+  /// Return true if \p MOP1 and \p MOP2 are register operands are defined by
+  /// equivalent instructions.
+  bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2);
+
+  /// Return true if \p MOP is defined by a G_CONSTANT with a value equal to
+  /// \p C.
+  bool matchConstantOp(const MachineOperand &MOP, int64_t C);
+
+  /// Optimize (cond ? x : x) -> x
+  bool matchSelectSameVal(MachineInstr &MI);
+
+  /// Optimize (x op x) -> x
+  bool matchBinOpSameVal(MachineInstr &MI);
+
+  /// Check if operand \p OpIdx is zero.
+  bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx);
+
+  /// Check if operand \p OpIdx is undef.
+  bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx);
+
+  /// Check if operand \p OpIdx is known to be a power of 2.
+  bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, unsigned OpIdx);
+
+  /// Erase \p MI
+  bool eraseInst(MachineInstr &MI);
+
+  /// Return true if MI is a G_ADD which can be simplified to a G_SUB.
+  bool matchSimplifyAddToSub(MachineInstr &MI,
+                             std::tuple<Register, Register> &MatchInfo);
+  bool applySimplifyAddToSub(MachineInstr &MI,
+                             std::tuple<Register, Register> &MatchInfo);
+
+  /// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
+  bool
+  matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI,
+                                       InstructionStepsMatchInfo &MatchInfo);
+
+  /// Replace \p MI with a series of instructions described in \p MatchInfo.
+  bool applyBuildInstructionSteps(MachineInstr &MI,
+                                  InstructionStepsMatchInfo &MatchInfo);
+
+  /// Match ashr (shl x, C), C -> sext_inreg (C)
+  bool matchAshrShlToSextInreg(MachineInstr &MI,
+                               std::tuple<Register, int64_t> &MatchInfo);
+  bool applyAshShlToSextInreg(MachineInstr &MI,
+                              std::tuple<Register, int64_t> &MatchInfo);
+  /// \return true if \p MI is a G_AND instruction whose operands are x and y
+  /// where x & y == x or x & y == y. (E.g., one of operands is all-ones value.)
+  ///
+  /// \param [in] MI - The G_AND instruction.
+  /// \param [out] Replacement - A register the G_AND should be replaced with on
+  /// success.
+  bool matchRedundantAnd(MachineInstr &MI, Register &Replacement);
+
+  /// \return true if \p MI is a G_OR instruction whose operands are x and y
+  /// where x | y == x or x | y == y. (E.g., one of operands is all-zeros
+  /// value.)
+  ///
+  /// \param [in] MI - The G_OR instruction.
+  /// \param [out] Replacement - A register the G_OR should be replaced with on
+  /// success.
+  bool matchRedundantOr(MachineInstr &MI, Register &Replacement);
+
+  /// \return true if \p MI is a G_SEXT_INREG that can be erased.
+  bool matchRedundantSExtInReg(MachineInstr &MI);
+
+  /// Combine inverting a result of a compare into the opposite cond code.
+  bool matchNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
+  bool applyNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
+
+  /// Fold (xor (and x, y), y) -> (and (not x), y)
+  ///{
+  bool matchXorOfAndWithSameReg(MachineInstr &MI,
+                                std::pair<Register, Register> &MatchInfo);
+  bool applyXorOfAndWithSameReg(MachineInstr &MI,
+                                std::pair<Register, Register> &MatchInfo);
+  ///}
+
+  /// Combine G_PTR_ADD with nullptr to G_INTTOPTR
+  bool matchPtrAddZero(MachineInstr &MI);
+  bool applyPtrAddZero(MachineInstr &MI);
+
+  /// Combine G_UREM x, (known power of 2) to an add and bitmasking.
+  bool applySimplifyURemByPow2(MachineInstr &MI);
+
+  bool matchCombineInsertVecElts(MachineInstr &MI,
+                                 SmallVectorImpl<Register> &MatchInfo);
+
+  bool applyCombineInsertVecElts(MachineInstr &MI,
+                             SmallVectorImpl<Register> &MatchInfo);
 
   /// Try to transform \p MI by using all of the above
   /// combine functions. Returns true if changed.
   bool tryCombine(MachineInstr &MI);
+
+private:
+  // Memcpy family optimization helpers.
+  bool optimizeMemcpy(MachineInstr &MI, Register Dst, Register Src,
+                      unsigned KnownLen, Align DstAlign, Align SrcAlign,
+                      bool IsVolatile);
+  bool optimizeMemmove(MachineInstr &MI, Register Dst, Register Src,
+                       unsigned KnownLen, Align DstAlign, Align SrcAlign,
+                       bool IsVolatile);
+  bool optimizeMemset(MachineInstr &MI, Register Dst, Register Val,
+                      unsigned KnownLen, Align DstAlign, bool IsVolatile);
+
+  /// Given a non-indexed load or store instruction \p MI, find an offset that
+  /// can be usefully and legally folded into it as a post-indexing operation.
+  ///
+  /// \returns true if a candidate is found.
+  bool findPostIndexCandidate(MachineInstr &MI, Register &Addr, Register &Base,
+                              Register &Offset);
+
+  /// Given a non-indexed load or store instruction \p MI, find an offset that
+  /// can be usefully and legally folded into it as a pre-indexing operation.
+  ///
+  /// \returns true if a candidate is found.
+  bool findPreIndexCandidate(MachineInstr &MI, Register &Addr, Register &Base,
+                             Register &Offset);
 };
 } // namespace llvm
 
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerInfo.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
index 3b09a8e..e95a5e2 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerInfo.h
@@ -27,9 +27,11 @@
 class CombinerInfo {
 public:
   CombinerInfo(bool AllowIllegalOps, bool ShouldLegalizeIllegal,
-               LegalizerInfo *LInfo)
+               const LegalizerInfo *LInfo, bool OptEnabled, bool OptSize,
+               bool MinSize)
       : IllegalOpsAllowed(AllowIllegalOps),
-        LegalizeIllegalOps(ShouldLegalizeIllegal), LInfo(LInfo) {
+        LegalizeIllegalOps(ShouldLegalizeIllegal), LInfo(LInfo),
+        EnableOpt(OptEnabled), EnableOptSize(OptSize), EnableMinSize(MinSize) {
     assert(((AllowIllegalOps || !LegalizeIllegalOps) || LInfo) &&
            "Expecting legalizerInfo when illegalops not allowed");
   }
@@ -43,6 +45,15 @@
   bool LegalizeIllegalOps; // TODO: Make use of this.
   const LegalizerInfo *LInfo;
 
+  /// Whether optimizations should be enabled. This is to distinguish between
+  /// uses of the combiner unconditionally and only when optimizations are
+  /// specifically enabled/
+  bool EnableOpt;
+  /// Whether we're optimizing for size.
+  bool EnableOptSize;
+  /// Whether we're optimizing for minsize (-Oz).
+  bool EnableMinSize;
+
   /// Attempt to combine instructions using MI as the root.
   ///
   /// Use Observer to report the creation, modification, and erasure of
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h
index e817d9b..df196bf 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/ConstantFoldingMIRBuilder.h
@@ -54,6 +54,17 @@
         return buildConstant(Dst, MaybeCst->getSExtValue());
       break;
     }
+    case TargetOpcode::G_SEXT_INREG: {
+      assert(DstOps.size() == 1 && "Invalid dst ops");
+      assert(SrcOps.size() == 2 && "Invalid src ops");
+      const DstOp &Dst = DstOps[0];
+      const SrcOp &Src0 = SrcOps[0];
+      const SrcOp &Src1 = SrcOps[1];
+      if (auto MaybeCst =
+              ConstantFoldExtOp(Opc, Src0.getReg(), Src1.getImm(), *getMRI()))
+        return buildConstant(Dst, MaybeCst->getSExtValue());
+      break;
+    }
     }
     return MachineIRBuilder::buildInstr(Opc, DstOps, SrcOps);
   }
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h
index e5691cb..dd7f04a 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h
@@ -51,7 +51,7 @@
   /// For convenience, finishedChangingAllUsesOfReg() will report the completion
   /// of the changes. The use list may change between this call and
   /// finishedChangingAllUsesOfReg().
-  void changingAllUsesOfReg(const MachineRegisterInfo &MRI, unsigned Reg);
+  void changingAllUsesOfReg(const MachineRegisterInfo &MRI, Register Reg);
   /// All instructions reported as changing by changingAllUsesOfReg() have
   /// finished being changed.
   void finishedChangingAllUsesOfReg();
@@ -101,7 +101,7 @@
   void MF_HandleRemoval(MachineInstr &MI) override { erasingInstr(MI); }
 };
 
-/// A simple RAII based CSEInfo installer.
+/// A simple RAII based Delegate installer.
 /// Use this in a scope to install a delegate to the MachineFunction and reset
 /// it at the end of the scope.
 class RAIIDelegateInstaller {
@@ -113,5 +113,27 @@
   ~RAIIDelegateInstaller();
 };
 
+/// A simple RAII based Observer installer.
+/// Use this in a scope to install the Observer to the MachineFunction and reset
+/// it at the end of the scope.
+class RAIIMFObserverInstaller {
+  MachineFunction &MF;
+
+public:
+  RAIIMFObserverInstaller(MachineFunction &MF, GISelChangeObserver &Observer);
+  ~RAIIMFObserverInstaller();
+};
+
+/// Class to install both of the above.
+class RAIIMFObsDelInstaller {
+  RAIIDelegateInstaller DelI;
+  RAIIMFObserverInstaller ObsI;
+
+public:
+  RAIIMFObsDelInstaller(MachineFunction &MF, GISelObserverWrapper &Wrapper)
+      : DelI(MF, &Wrapper), ObsI(MF, Wrapper) {}
+  ~RAIIMFObsDelInstaller() = default;
+};
+
 } // namespace llvm
 #endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h
new file mode 100644
index 0000000..eafed37
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h
@@ -0,0 +1,131 @@
+//===- llvm/CodeGen/GlobalISel/GISelKnownBits.h ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// Provides analysis for querying information about KnownBits during GISel
+/// passes.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CODEGEN_GLOBALISEL_KNOWNBITSINFO_H
+#define LLVM_CODEGEN_GLOBALISEL_KNOWNBITSINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/Register.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Support/KnownBits.h"
+
+namespace llvm {
+
+class TargetLowering;
+class DataLayout;
+
+class GISelKnownBits : public GISelChangeObserver {
+  MachineFunction &MF;
+  MachineRegisterInfo &MRI;
+  const TargetLowering &TL;
+  const DataLayout &DL;
+  unsigned MaxDepth;
+  /// Cache maintained during a computeKnownBits request.
+  SmallDenseMap<Register, KnownBits, 16> ComputeKnownBitsCache;
+
+  void computeKnownBitsMin(Register Src0, Register Src1, KnownBits &Known,
+                           const APInt &DemandedElts,
+                           unsigned Depth = 0);
+
+  unsigned computeNumSignBitsMin(Register Src0, Register Src1,
+                                 const APInt &DemandedElts, unsigned Depth = 0);
+
+public:
+  GISelKnownBits(MachineFunction &MF, unsigned MaxDepth = 6);
+  virtual ~GISelKnownBits() = default;
+
+  const MachineFunction &getMachineFunction() const {
+    return MF;
+  }
+
+  const DataLayout &getDataLayout() const {
+    return DL;
+  }
+
+  virtual void computeKnownBitsImpl(Register R, KnownBits &Known,
+                                    const APInt &DemandedElts,
+                                    unsigned Depth = 0);
+
+  unsigned computeNumSignBits(Register R, const APInt &DemandedElts,
+                              unsigned Depth = 0);
+  unsigned computeNumSignBits(Register R, unsigned Depth = 0);
+
+  // KnownBitsAPI
+  KnownBits getKnownBits(Register R);
+  KnownBits getKnownBits(Register R, const APInt &DemandedElts,
+                         unsigned Depth = 0);
+
+  // Calls getKnownBits for first operand def of MI.
+  KnownBits getKnownBits(MachineInstr &MI);
+  APInt getKnownZeroes(Register R);
+  APInt getKnownOnes(Register R);
+
+  /// \return true if 'V & Mask' is known to be zero in DemandedElts. We use
+  /// this predicate to simplify operations downstream.
+  /// Mask is known to be zero for bits that V cannot have.
+  bool maskedValueIsZero(Register Val, const APInt &Mask) {
+    return Mask.isSubsetOf(getKnownBits(Val).Zero);
+  }
+
+  /// \return true if the sign bit of Op is known to be zero.  We use this
+  /// predicate to simplify operations downstream.
+  bool signBitIsZero(Register Op);
+
+  static void computeKnownBitsForAlignment(KnownBits &Known,
+                                           Align Alignment) {
+    // The low bits are known zero if the pointer is aligned.
+    Known.Zero.setLowBits(Log2(Alignment));
+  }
+
+  /// \return The known alignment for the pointer-like value \p R.
+  Align computeKnownAlignment(Register R, unsigned Depth = 0);
+
+  // Observer API. No-op for non-caching implementation.
+  void erasingInstr(MachineInstr &MI) override{};
+  void createdInstr(MachineInstr &MI) override{};
+  void changingInstr(MachineInstr &MI) override{};
+  void changedInstr(MachineInstr &MI) override{};
+
+protected:
+  unsigned getMaxDepth() const { return MaxDepth; }
+};
+
+/// To use KnownBitsInfo analysis in a pass,
+/// KnownBitsInfo &Info = getAnalysis<GISelKnownBitsInfoAnalysis>().get(MF);
+/// Add to observer if the Info is caching.
+/// WrapperObserver.addObserver(Info);
+
+/// Eventually add other features such as caching/ser/deserializing
+/// to MIR etc. Those implementations can derive from GISelKnownBits
+/// and override computeKnownBitsImpl.
+class GISelKnownBitsAnalysis : public MachineFunctionPass {
+  std::unique_ptr<GISelKnownBits> Info;
+
+public:
+  static char ID;
+  GISelKnownBitsAnalysis() : MachineFunctionPass(ID) {
+    initializeGISelKnownBitsAnalysisPass(*PassRegistry::getPassRegistry());
+  }
+  GISelKnownBits &get(MachineFunction &MF) {
+    if (!Info)
+      Info = std::make_unique<GISelKnownBits>(MF);
+    return *Info.get();
+  }
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+  bool runOnMachineFunction(MachineFunction &MF) override;
+  void releaseMemory() override { Info.reset(); }
+};
+} // namespace llvm
+
+#endif // ifdef
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelWorkList.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
index b0bb519..9e7ade3 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
@@ -11,9 +11,6 @@
 
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallVector.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/Support/Debug.h"
 
 namespace llvm {
 
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index 8654ba8..8eab8a5 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -20,13 +20,14 @@
 
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
 #include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
-#include "llvm/CodeGen/GlobalISel/Types.h"
-#include "llvm/CodeGen/SwiftErrorValueTracking.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/SwiftErrorValueTracking.h"
 #include "llvm/CodeGen/SwitchLoweringUtils.h"
 #include "llvm/IR/Intrinsics.h"
 #include "llvm/Support/Allocator.h"
+#include "llvm/Support/CodeGen.h"
 #include <memory>
 #include <utility>
 
@@ -37,8 +38,8 @@
 class CallInst;
 class CallLowering;
 class Constant;
+class ConstrainedFPIntrinsic;
 class DataLayout;
-class FunctionLoweringInfo;
 class Instruction;
 class MachineBasicBlock;
 class MachineFunction;
@@ -202,6 +203,10 @@
   /// \return true if the materialization succeeded.
   bool translate(const Constant &C, Register Reg);
 
+  // Translate U as a copy of V.
+  bool translateCopy(const User &U, const Value &V,
+                     MachineIRBuilder &MIRBuilder);
+
   /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
   /// emitted.
   bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);
@@ -213,13 +218,15 @@
   bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);
 
   /// Translate an LLVM string intrinsic (memcpy, memset, ...).
-  bool translateMemfunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
-                        unsigned ID);
+  bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
+                        unsigned Opcode);
 
   void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
 
   bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
                                   MachineIRBuilder &MIRBuilder);
+  bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
+                                    MachineIRBuilder &MIRBuilder);
 
   /// Helper function for translateSimpleIntrinsic.
   /// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a
@@ -232,10 +239,13 @@
   bool translateSimpleIntrinsic(const CallInst &CI, Intrinsic::ID ID,
                                 MachineIRBuilder &MIRBuilder);
 
+  bool translateConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI,
+                                       MachineIRBuilder &MIRBuilder);
+
   bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
                                MachineIRBuilder &MIRBuilder);
 
-  bool translateInlineAsm(const CallInst &CI, MachineIRBuilder &MIRBuilder);
+  bool translateInlineAsm(const CallBase &CB, MachineIRBuilder &MIRBuilder);
 
   /// Returns true if the value should be split into multiple LLTs.
   /// If \p Offsets is given then the split type's offsets will be stored in it.
@@ -243,10 +253,26 @@
   bool valueIsSplit(const Value &V,
                     SmallVectorImpl<uint64_t> *Offsets = nullptr);
 
+  /// Common code for translating normal calls or invokes.
+  bool translateCallBase(const CallBase &CB, MachineIRBuilder &MIRBuilder);
+
   /// Translate call instruction.
   /// \pre \p U is a call instruction.
   bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
 
+  /// When an invoke or a cleanupret unwinds to the next EH pad, there are
+  /// many places it could ultimately go. In the IR, we have a single unwind
+  /// destination, but in the machine CFG, we enumerate all the possible blocks.
+  /// This function skips over imaginary basic blocks that hold catchswitch
+  /// instructions, and finds all the "real" machine
+  /// basic block destinations. As those destinations may not be successors of
+  /// EHPadBB, here we also calculate the edge probability to those
+  /// destinations. The passed-in Prob is the edge probability to EHPadBB.
+  bool findUnwindDestinations(
+      const BasicBlock *EHPadBB, BranchProbability Prob,
+      SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
+          &UnwindDests);
+
   bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
 
   bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder);
@@ -278,11 +304,37 @@
   /// MachineBasicBlocks for the function have been created.
   void finishPendingPhis();
 
+  /// Translate \p Inst into a unary operation \p Opcode.
+  /// \pre \p U is a unary operation.
+  bool translateUnaryOp(unsigned Opcode, const User &U,
+                        MachineIRBuilder &MIRBuilder);
+
   /// Translate \p Inst into a binary operation \p Opcode.
   /// \pre \p U is a binary operation.
   bool translateBinaryOp(unsigned Opcode, const User &U,
                          MachineIRBuilder &MIRBuilder);
 
+  /// If the set of cases should be emitted as a series of branches, return
+  /// true. If we should emit this as a bunch of and/or'd together conditions,
+  /// return false.
+  bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
+  /// Helper method for findMergedConditions.
+  /// This function emits a branch and is used at the leaves of an OR or an
+  /// AND operator tree.
+  void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
+                                    MachineBasicBlock *FBB,
+                                    MachineBasicBlock *CurBB,
+                                    MachineBasicBlock *SwitchBB,
+                                    BranchProbability TProb,
+                                    BranchProbability FProb, bool InvertCond);
+  /// Used during condbr translation to find trees of conditions that can be
+  /// optimized.
+  void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
+                            MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
+                            MachineBasicBlock *SwitchBB,
+                            Instruction::BinaryOps Opc, BranchProbability TProb,
+                            BranchProbability FProb, bool InvertCond);
+
   /// Translate branch (br) instruction.
   /// \pre \p U is a branch instruction.
   bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
@@ -296,19 +348,23 @@
   void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
                       MachineIRBuilder &MIB);
 
-  bool lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
-                              MachineBasicBlock *SwitchMBB,
-                              MachineBasicBlock *CurMBB,
-                              MachineBasicBlock *DefaultMBB,
-                              MachineIRBuilder &MIB,
-                              MachineFunction::iterator BBI,
-                              BranchProbability UnhandledProbs,
-                              SwitchCG::CaseClusterIt I,
-                              MachineBasicBlock *Fallthrough,
-                              bool FallthroughUnreachable);
+  /// Generate for for the BitTest header block, which precedes each sequence of
+  /// BitTestCases.
+  void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
+                         MachineBasicBlock *SwitchMBB);
+  /// Generate code to produces one "bit test" for a given BitTestCase \p B.
+  void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
+                       BranchProbability BranchProbToNext, Register Reg,
+                       SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
 
-  bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
-                                Value *Cond,
+  bool lowerJumpTableWorkItem(
+      SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
+      MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
+      MachineIRBuilder &MIB, MachineFunction::iterator BBI,
+      BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I,
+      MachineBasicBlock *Fallthrough, bool FallthroughUnreachable);
+
+  bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond,
                                 MachineBasicBlock *Fallthrough,
                                 bool FallthroughUnreachable,
                                 BranchProbability UnhandledProbs,
@@ -316,6 +372,14 @@
                                 MachineIRBuilder &MIB,
                                 MachineBasicBlock *SwitchMBB);
 
+  bool lowerBitTestWorkItem(
+      SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
+      MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
+      MachineIRBuilder &MIB, MachineFunction::iterator BBI,
+      BranchProbability DefaultProb, BranchProbability UnhandledProbs,
+      SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
+      bool FallthroughUnreachable);
+
   bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
                            MachineBasicBlock *SwitchMBB,
                            MachineBasicBlock *DefaultMBB,
@@ -342,8 +406,6 @@
   /// \pre \p U is a return instruction.
   bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);
 
-  bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder);
-
   bool translateFNeg(const User &U, MachineIRBuilder &MIRBuilder);
 
   bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
@@ -428,6 +490,9 @@
   bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
     return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
   }
+  bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
+    return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
+  }
   bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
     return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
   }
@@ -449,6 +514,7 @@
   bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder);
   bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder);
   bool translateFence(const User &U, MachineIRBuilder &MIRBuilder);
+  bool translateFreeze(const User &U, MachineIRBuilder &MIRBuilder);
 
   // Stubs to keep the compiler happy while we implement the rest of the
   // translation.
@@ -505,6 +571,8 @@
   /// Current target configuration. Controls how the pass handles errors.
   const TargetPassConfig *TPC;
 
+  CodeGenOpt::Level OptLevel;
+
   /// Current optimization remark emitter. Used to report failures.
   std::unique_ptr<OptimizationRemarkEmitter> ORE;
 
@@ -514,6 +582,10 @@
   // function has the optnone attribute.
   bool EnableOpts = false;
 
+  /// True when the block contains a tail call. This allows the IRTranslator to
+  /// stop translating such blocks early.
+  bool HasTailCall = false;
+
   /// Switch analysis and optimization.
   class GISelSwitchLowering : public SwitchCG::SwitchLowering {
   public:
@@ -571,7 +643,7 @@
   /// Get the alignment of the given memory operation instruction. This will
   /// either be the explicitly specified value or the ABI-required alignment for
   /// the type being accessed (according to the Module's DataLayout).
-  unsigned getMemOpAlignment(const Instruction &I);
+  Align getMemOpAlign(const Instruction &I);
 
   /// Get the MachineBasicBlock that represents \p BB. Specifically, the block
   /// returned will be the head of the translated block (suitable for branch
@@ -600,12 +672,12 @@
   BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
                                        const MachineBasicBlock *Dst) const;
 
-  void addSuccessorWithProb(MachineBasicBlock *Src, MachineBasicBlock *Dst,
-                            BranchProbability Prob);
+  void addSuccessorWithProb(
+      MachineBasicBlock *Src, MachineBasicBlock *Dst,
+      BranchProbability Prob = BranchProbability::getUnknown());
 
 public:
-  // Ctor, nothing fancy.
-  IRTranslator();
+  IRTranslator(CodeGenOpt::Level OptLevel = CodeGenOpt::None);
 
   StringRef getPassName() const override { return "IRTranslator"; }
 
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InlineAsmLowering.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InlineAsmLowering.h
new file mode 100644
index 0000000..ac61848
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InlineAsmLowering.h
@@ -0,0 +1,67 @@
+//===- llvm/CodeGen/GlobalISel/InlineAsmLowering.h --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file describes how to lower LLVM inline asm to machine code INLINEASM.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_INLINEASMLOWERING_H
+#define LLVM_CODEGEN_GLOBALISEL_INLINEASMLOWERING_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include <functional>
+
+namespace llvm {
+class CallBase;
+class MachineIRBuilder;
+class MachineOperand;
+class Register;
+class TargetLowering;
+class Value;
+
+class InlineAsmLowering {
+  const TargetLowering *TLI;
+
+  virtual void anchor();
+
+public:
+  /// Lower the given inline asm call instruction
+  /// \p GetOrCreateVRegs is a callback to materialize a register for the
+  /// input and output operands of the inline asm
+  /// \return True if the lowering succeeds, false otherwise.
+  bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB,
+                      std::function<ArrayRef<Register>(const Value &Val)>
+                          GetOrCreateVRegs) const;
+
+  /// Lower the specified operand into the Ops vector.
+  /// \p Val is the IR input value to be lowered
+  /// \p Constraint is the user supplied constraint string
+  /// \p Ops is the vector to be filled with the lowered operands
+  /// \return True if the lowering succeeds, false otherwise.
+  virtual bool lowerAsmOperandForConstraint(Value *Val, StringRef Constraint,
+                                            std::vector<MachineOperand> &Ops,
+                                            MachineIRBuilder &MIRBuilder) const;
+
+protected:
+  /// Getter for generic TargetLowering class.
+  const TargetLowering *getTLI() const { return TLI; }
+
+  /// Getter for target specific TargetLowering class.
+  template <class XXXTargetLowering> const XXXTargetLowering *getTLI() const {
+    return static_cast<const XXXTargetLowering *>(TLI);
+  }
+
+public:
+  InlineAsmLowering(const TargetLowering *TLI) : TLI(TLI) {}
+  virtual ~InlineAsmLowering() = default;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_INLINEASMLOWERING_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
index e4d05a5..bf9991e 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
@@ -31,6 +31,7 @@
 
 class APInt;
 class APFloat;
+class GISelKnownBits;
 class MachineInstr;
 class MachineInstrBuilder;
 class MachineFunction;
@@ -111,6 +112,14 @@
   /// - InsnID - Instruction ID
   /// - Expected opcode
   GIM_CheckOpcode,
+
+  /// Check the opcode on the specified instruction, checking 2 acceptable
+  /// alternatives.
+  /// - InsnID - Instruction ID
+  /// - Expected opcode
+  /// - Alternative expected opcode
+  GIM_CheckOpcodeIsEither,
+
   /// Check the instruction has the right number of operands
   /// - InsnID - Instruction ID
   /// - Expected number of operands
@@ -138,6 +147,23 @@
   /// - MMOIdx - MMO index
   /// - Size - The size in bytes of the memory access
   GIM_CheckMemorySizeEqualTo,
+
+  /// Check the address space of the memory access for the given machine memory
+  /// operand.
+  /// - InsnID - Instruction ID
+  /// - MMOIdx - MMO index
+  /// - NumAddrSpace - Number of valid address spaces
+  /// - AddrSpaceN - An allowed space of the memory access
+  /// - AddrSpaceN+1 ...
+  GIM_CheckMemoryAddressSpace,
+
+  /// Check the minimum alignment of the memory access for the given machine
+  /// memory operand.
+  /// - InsnID - Instruction ID
+  /// - MMOIdx - MMO index
+  /// - MinAlign - Minimum acceptable alignment
+  GIM_CheckMemoryAlignment,
+
   /// Check the size of the memory access for the given machine memory operand
   /// against the size of an operand.
   /// - InsnID - Instruction ID
@@ -146,6 +172,15 @@
   GIM_CheckMemorySizeEqualToLLT,
   GIM_CheckMemorySizeLessThanLLT,
   GIM_CheckMemorySizeGreaterThanLLT,
+
+  /// Check if this is a vector that can be treated as a vector splat
+  /// constant. This is valid for both G_BUILD_VECTOR as well as
+  /// G_BUILD_VECTOR_TRUNC. For AllOnes refers to individual bits, so a -1
+  /// element.
+  /// - InsnID - Instruction ID
+  GIM_CheckIsBuildVectorAllOnes,
+  GIM_CheckIsBuildVectorAllZeros,
+
   /// Check a generic C++ instruction predicate
   /// - InsnID - Instruction ID
   /// - PredicateID - The ID of the predicate function to call
@@ -191,11 +226,22 @@
   /// - Expected Intrinsic ID
   GIM_CheckIntrinsicID,
 
+  /// Check the operand is a specific predicate
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - Expected predicate
+  GIM_CheckCmpPredicate,
+
   /// Check the specified operand is an MBB
   /// - InsnID - Instruction ID
   /// - OpIdx - Operand index
   GIM_CheckIsMBB,
 
+  /// Check the specified operand is an Imm
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  GIM_CheckIsImm,
+
   /// Check if the specified operand is safe to fold into the current
   /// instruction.
   /// - InsnID - Instruction ID
@@ -208,6 +254,15 @@
   /// - OtherOpIdx - Other operand index
   GIM_CheckIsSameOperand,
 
+  /// Predicates with 'let PredicateCodeUsesOperands = 1' need to examine some
+  /// named operands that will be recorded in RecordedOperands. Names of these
+  /// operands are referenced in predicate argument list. Emitter determines
+  /// StoreIdx(corresponds to the order in which names appear in argument list).
+  /// - InsnID - Instruction ID
+  /// - OpIdx - Operand index
+  /// - StoreIdx - Store location in RecordedOperands.
+  GIM_RecordNamedOperand,
+
   /// Fail the current try-block, or completely fail to match if there is no
   /// current try-block.
   GIM_Reject,
@@ -264,6 +319,13 @@
   /// - TempRegFlags - The register flags to set
   GIR_AddTempRegister,
 
+  /// Add a temporary register to the specified instruction
+  /// - InsnID - Instruction ID to modify
+  /// - TempRegID - The temporary register ID to add
+  /// - TempRegFlags - The register flags to set
+  /// - SubRegIndex - The subregister index to set
+  GIR_AddTempSubRegister,
+
   /// Add an immediate to the specified instruction
   /// - InsnID - Instruction ID to modify
   /// - Imm - The immediate to add
@@ -284,6 +346,14 @@
   /// - RendererFnID - Custom renderer function to call
   GIR_CustomRenderer,
 
+  /// Render operands to the specified instruction using a custom function,
+  /// reading from a specific operand.
+  /// - InsnID - Instruction ID to modify
+  /// - OldInsnID - Instruction ID to get the matched operand from
+  /// - OpIdx - Operand index in OldInsnID the render function should read from..
+  /// - RendererFnID - Custom renderer function to call
+  GIR_CustomOperandRenderer,
+
   /// Render a G_CONSTANT operator as a sign-extended immediate.
   /// - NewInsnID - Instruction ID to modify
   /// - OldInsnID - Instruction ID to copy from
@@ -355,7 +425,25 @@
   ///   if returns true:
   ///     for I in all mutated/inserted instructions:
   ///       !isPreISelGenericOpcode(I.getOpcode())
-  virtual bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const = 0;
+  virtual bool select(MachineInstr &I) = 0;
+
+  CodeGenCoverage *CoverageInfo = nullptr;
+  GISelKnownBits *KnownBits = nullptr;
+  MachineFunction *MF = nullptr;
+
+  virtual void setupGeneratedPerFunctionState(MachineFunction &MF) {
+    llvm_unreachable("TableGen should have emitted implementation");
+  }
+
+  /// Setup per-MF selector state.
+  virtual void setupMF(MachineFunction &mf,
+                       GISelKnownBits &KB,
+                       CodeGenCoverage &covinfo) {
+    CoverageInfo = &covinfo;
+    KnownBits = &KB;
+    MF = &mf;
+    setupGeneratedPerFunctionState(mf);
+  }
 
 protected:
   using ComplexRendererFns =
@@ -367,6 +455,11 @@
     std::vector<ComplexRendererFns::value_type> Renderers;
     RecordedMIVector MIs;
     DenseMap<unsigned, unsigned> TempRegisters;
+    /// Named operands that predicate with 'let PredicateCodeUsesOperands = 1'
+    /// referenced in its argument list. Operands are inserted at index set by
+    /// emitter, it corresponds to the order in which names appear in argument
+    /// list. Currently such predicates don't have more then 3 arguments.
+    std::array<const MachineOperand *, 3> RecordedOperands;
 
     MatcherState(unsigned MaxRenderers);
   };
@@ -427,7 +520,9 @@
     llvm_unreachable(
         "Subclasses must override this with a tablegen-erated function");
   }
-  virtual bool testMIPredicate_MI(unsigned, const MachineInstr &) const {
+  virtual bool testMIPredicate_MI(
+      unsigned, const MachineInstr &,
+      const std::array<const MachineOperand *, 3> &Operands) const {
     llvm_unreachable(
         "Subclasses must override this with a tablegen-erated function");
   }
@@ -445,7 +540,7 @@
   bool isOperandImmEqual(const MachineOperand &MO, int64_t Value,
                          const MachineRegisterInfo &MRI) const;
 
-  /// Return true if the specified operand is a G_GEP with a G_CONSTANT on the
+  /// Return true if the specified operand is a G_PTR_ADD with a G_CONSTANT on the
   /// right-hand side. GlobalISel's separation of pointer and integer types
   /// means that we don't need to worry about G_OR with equivalent semantics.
   bool isBaseWithConstantOffset(const MachineOperand &Root,
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
index e010180..bcb84c3 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
@@ -26,6 +26,7 @@
 #include "llvm/CodeGen/TargetOpcodes.h"
 #include "llvm/CodeGen/TargetRegisterInfo.h"
 #include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/raw_ostream.h"
@@ -57,6 +58,11 @@
   uint64_t CurrentIdx = 0;
   SmallVector<uint64_t, 4> OnFailResumeAt;
 
+  // Bypass the flag check on the instruction, and only look at the MCInstrDesc.
+  bool NoFPException = !State.MIs[0]->getDesc().mayRaiseFPException();
+
+  const uint16_t Flags = State.MIs[0]->getFlags();
+
   enum RejectAction { RejectAndGiveUp, RejectAndResume };
   auto handleReject = [&]() -> RejectAction {
     DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
@@ -70,6 +76,19 @@
     return RejectAndResume;
   };
 
+  auto propagateFlags = [=](NewMIVector &OutMIs) {
+    for (auto MIB : OutMIs) {
+      // Set the NoFPExcept flag when no original matched instruction could
+      // raise an FP exception, but the new instruction potentially might.
+      uint16_t MIBFlags = Flags;
+      if (NoFPException && MIB->mayRaiseFPException())
+        MIBFlags |= MachineInstr::NoFPExcept;
+      MIB.setMIFlags(MIBFlags);
+    }
+
+    return true;
+  };
+
   while (true) {
     assert(CurrentIdx != ~0u && "Invalid MatchTable index");
     int64_t MatcherOpcode = MatchTable[CurrentIdx++];
@@ -98,7 +117,7 @@
           return false;
         break;
       }
-      if (TRI.isPhysicalRegister(MO.getReg())) {
+      if (Register::isPhysicalRegister(MO.getReg())) {
         DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                         dbgs() << CurrentIdx << ": Is a physical register\n");
         if (handleReject() == RejectAndGiveUp)
@@ -135,24 +154,31 @@
       break;
     }
 
-    case GIM_CheckOpcode: {
+    case GIM_CheckOpcode:
+    case GIM_CheckOpcodeIsEither: {
       int64_t InsnID = MatchTable[CurrentIdx++];
-      int64_t Expected = MatchTable[CurrentIdx++];
+      int64_t Expected0 = MatchTable[CurrentIdx++];
+      int64_t Expected1 = -1;
+      if (MatcherOpcode == GIM_CheckOpcodeIsEither)
+        Expected1 = MatchTable[CurrentIdx++];
 
       assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
       unsigned Opcode = State.MIs[InsnID]->getOpcode();
 
       DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
-                      dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
-                             << "], ExpectedOpcode=" << Expected
-                             << ") // Got=" << Opcode << "\n");
-      if (Opcode != Expected) {
+        dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
+        << "], ExpectedOpcode=" << Expected0;
+        if (MatcherOpcode == GIM_CheckOpcodeIsEither)
+          dbgs() << " || " << Expected1;
+        dbgs() << ") // Got=" << Opcode << "\n";
+      );
+
+      if (Opcode != Expected0 && Opcode != Expected1) {
         if (handleReject() == RejectAndGiveUp)
           return false;
       }
       break;
     }
-
     case GIM_SwitchOpcode: {
       int64_t InsnID = MatchTable[CurrentIdx++];
       int64_t LowerBound = MatchTable[CurrentIdx++];
@@ -174,7 +200,7 @@
       CurrentIdx = MatchTable[CurrentIdx + (Opcode - LowerBound)];
       if (!CurrentIdx) {
         CurrentIdx = Default;
-	break;
+        break;
       }
       OnFailResumeAt.push_back(Default);
       break;
@@ -302,6 +328,35 @@
           return false;
       break;
     }
+    case GIM_CheckIsBuildVectorAllOnes:
+    case GIM_CheckIsBuildVectorAllZeros: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx
+                             << ": GIM_CheckBuildVectorAll{Zeros|Ones}(MIs["
+                             << InsnID << "])\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      const MachineInstr *MI = State.MIs[InsnID];
+      assert((MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR ||
+              MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR_TRUNC) &&
+             "Expected G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC");
+
+      if (MatcherOpcode == GIM_CheckIsBuildVectorAllOnes) {
+        if (!isBuildVectorAllOnes(*MI, MRI)) {
+          if (handleReject() == RejectAndGiveUp)
+            return false;
+        }
+      } else {
+        if (!isBuildVectorAllZeros(*MI, MRI)) {
+          if (handleReject() == RejectAndGiveUp)
+            return false;
+        }
+      }
+
+      break;
+    }
     case GIM_CheckCxxInsnPredicate: {
       int64_t InsnID = MatchTable[CurrentIdx++];
       int64_t Predicate = MatchTable[CurrentIdx++];
@@ -312,7 +367,8 @@
       assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
       assert(Predicate > GIPFP_MI_Invalid && "Expected a valid predicate");
 
-      if (!testMIPredicate_MI(Predicate, *State.MIs[InsnID]))
+      if (!testMIPredicate_MI(Predicate, *State.MIs[InsnID],
+                              State.RecordedOperands))
         if (handleReject() == RejectAndGiveUp)
           return false;
       break;
@@ -370,6 +426,69 @@
             return false;
       break;
     }
+    case GIM_CheckMemoryAddressSpace: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t MMOIdx = MatchTable[CurrentIdx++];
+      // This accepts a list of possible address spaces.
+      const int NumAddrSpace = MatchTable[CurrentIdx++];
+
+      if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+        break;
+      }
+
+      // Need to still jump to the end of the list of address spaces if we find
+      // a match earlier.
+      const uint64_t LastIdx = CurrentIdx + NumAddrSpace;
+
+      const MachineMemOperand *MMO
+        = *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
+      const unsigned MMOAddrSpace = MMO->getAddrSpace();
+
+      bool Success = false;
+      for (int I = 0; I != NumAddrSpace; ++I) {
+        unsigned AddrSpace = MatchTable[CurrentIdx++];
+        DEBUG_WITH_TYPE(
+          TgtInstructionSelector::getName(),
+          dbgs() << "addrspace(" << MMOAddrSpace << ") vs "
+                 << AddrSpace << '\n');
+
+        if (AddrSpace == MMOAddrSpace) {
+          Success = true;
+          break;
+        }
+      }
+
+      CurrentIdx = LastIdx;
+      if (!Success && handleReject() == RejectAndGiveUp)
+        return false;
+      break;
+    }
+    case GIM_CheckMemoryAlignment: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t MMOIdx = MatchTable[CurrentIdx++];
+      unsigned MinAlign = MatchTable[CurrentIdx++];
+
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+      if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+        break;
+      }
+
+      MachineMemOperand *MMO
+        = *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckMemoryAlignment"
+                      << "(MIs[" << InsnID << "]->memoperands() + " << MMOIdx
+                      << ")->getAlignment() >= " << MinAlign << ")\n");
+      if (MMO->getAlign() < MinAlign && handleReject() == RejectAndGiveUp)
+        return false;
+
+      break;
+    }
     case GIM_CheckMemorySizeEqualTo: {
       int64_t InsnID = MatchTable[CurrentIdx++];
       int64_t MMOIdx = MatchTable[CurrentIdx++];
@@ -499,6 +618,20 @@
 
       break;
     }
+    case GIM_RecordNamedOperand: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      uint64_t StoreIdx = MatchTable[CurrentIdx++];
+
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_RecordNamedOperand(MIs["
+                             << InsnID << "]->getOperand(" << OpIdx
+                             << "), StoreIdx=" << StoreIdx << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      assert(StoreIdx < State.RecordedOperands.size() && "Index out of range");
+      State.RecordedOperands[StoreIdx] = &State.MIs[InsnID]->getOperand(OpIdx);
+      break;
+    }
     case GIM_CheckRegBankForClass: {
       int64_t InsnID = MatchTable[CurrentIdx++];
       int64_t OpIdx = MatchTable[CurrentIdx++];
@@ -510,7 +643,8 @@
       assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
       MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
       if (!MO.isReg() ||
-          &RBI.getRegBankFromRegClass(*TRI.getRegClass(RCEnum)) !=
+          &RBI.getRegBankFromRegClass(*TRI.getRegClass(RCEnum),
+                                      MRI.getType(MO.getReg())) !=
               RBI.getRegBank(MO.getReg(), MRI, TRI)) {
         if (handleReject() == RejectAndGiveUp)
           return false;
@@ -577,10 +711,15 @@
                              << "), Value=" << Value << ")\n");
       assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
       MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
-      if (!MO.isCImm() || !MO.getCImm()->equalsInt(Value)) {
-        if (handleReject() == RejectAndGiveUp)
-          return false;
-      }
+      if (MO.isImm() && MO.getImm() == Value)
+        break;
+
+      if (MO.isCImm() && MO.getCImm()->equalsInt(Value))
+        break;
+
+      if (handleReject() == RejectAndGiveUp)
+        return false;
+
       break;
     }
 
@@ -599,7 +738,21 @@
           return false;
       break;
     }
-
+    case GIM_CheckCmpPredicate: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t Value = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckCmpPredicate(MIs["
+                             << InsnID << "]->getOperand(" << OpIdx
+                             << "), Value=" << Value << ")\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      if (!MO.isPredicate() || MO.getPredicate() != Value)
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      break;
+    }
     case GIM_CheckIsMBB: {
       int64_t InsnID = MatchTable[CurrentIdx++];
       int64_t OpIdx = MatchTable[CurrentIdx++];
@@ -613,7 +766,19 @@
       }
       break;
     }
-
+    case GIM_CheckIsImm: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+                      dbgs() << CurrentIdx << ": GIM_CheckIsImm(MIs[" << InsnID
+                             << "]->getOperand(" << OpIdx << "))\n");
+      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      if (!State.MIs[InsnID]->getOperand(OpIdx).isImm()) {
+        if (handleReject() == RejectAndGiveUp)
+          return false;
+      }
+      break;
+    }
     case GIM_CheckIsSafeToFold: {
       int64_t InsnID = MatchTable[CurrentIdx++];
       DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
@@ -753,24 +918,35 @@
     case GIR_AddRegister: {
       int64_t InsnID = MatchTable[CurrentIdx++];
       int64_t RegNum = MatchTable[CurrentIdx++];
+      uint64_t RegFlags = MatchTable[CurrentIdx++];
       assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
-      OutMIs[InsnID].addReg(RegNum);
-      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
-                      dbgs() << CurrentIdx << ": GIR_AddRegister(OutMIs["
-                             << InsnID << "], " << RegNum << ")\n");
+      OutMIs[InsnID].addReg(RegNum, RegFlags);
+      DEBUG_WITH_TYPE(
+        TgtInstructionSelector::getName(),
+        dbgs() << CurrentIdx << ": GIR_AddRegister(OutMIs["
+        << InsnID << "], " << RegNum << ", " << RegFlags << ")\n");
       break;
     }
 
-    case GIR_AddTempRegister: {
+    case GIR_AddTempRegister:
+    case GIR_AddTempSubRegister: {
       int64_t InsnID = MatchTable[CurrentIdx++];
       int64_t TempRegID = MatchTable[CurrentIdx++];
       uint64_t TempRegFlags = MatchTable[CurrentIdx++];
+      unsigned SubReg = 0;
+      if (MatcherOpcode == GIR_AddTempSubRegister)
+        SubReg = MatchTable[CurrentIdx++];
+
       assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
-      OutMIs[InsnID].addReg(State.TempRegisters[TempRegID], TempRegFlags);
+
+      OutMIs[InsnID].addReg(State.TempRegisters[TempRegID], TempRegFlags, SubReg);
       DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                       dbgs() << CurrentIdx << ": GIR_AddTempRegister(OutMIs["
                              << InsnID << "], TempRegisters[" << TempRegID
-                             << "], " << TempRegFlags << ")\n");
+                             << "]";
+                      if (SubReg)
+                        dbgs() << '.' << TRI.getSubRegIndexName(SubReg);
+                      dbgs() << ", " << TempRegFlags << ")\n");
       break;
     }
 
@@ -854,8 +1030,27 @@
                       dbgs() << CurrentIdx << ": GIR_CustomRenderer(OutMIs["
                              << InsnID << "], MIs[" << OldInsnID << "], "
                              << RendererFnID << ")\n");
+      (ISel.*ISelInfo.CustomRenderers[RendererFnID])(
+        OutMIs[InsnID], *State.MIs[OldInsnID],
+        -1); // Not a source operand of the old instruction.
+      break;
+    }
+    case GIR_CustomOperandRenderer: {
+      int64_t InsnID = MatchTable[CurrentIdx++];
+      int64_t OldInsnID = MatchTable[CurrentIdx++];
+      int64_t OpIdx = MatchTable[CurrentIdx++];
+      int64_t RendererFnID = MatchTable[CurrentIdx++];
+      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+
+      DEBUG_WITH_TYPE(
+        TgtInstructionSelector::getName(),
+        dbgs() << CurrentIdx << ": GIR_CustomOperandRenderer(OutMIs["
+               << InsnID << "], MIs[" << OldInsnID << "]->getOperand("
+               << OpIdx << "), "
+        << RendererFnID << ")\n");
       (ISel.*ISelInfo.CustomRenderers[RendererFnID])(OutMIs[InsnID],
-                                                     *State.MIs[OldInsnID]);
+                                                     *State.MIs[OldInsnID],
+                                                     OpIdx);
       break;
     }
     case GIR_ConstrainOperandRC: {
@@ -939,6 +1134,7 @@
     case GIR_Done:
       DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                       dbgs() << CurrentIdx << ": GIR_Done\n");
+      propagateFlags(OutMIs);
       return true;
 
     default:
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
index a22778b..e7bda3b 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -46,11 +46,11 @@
       : Builder(B), MRI(MRI), LI(LI) {}
 
   bool tryCombineAnyExt(MachineInstr &MI,
-                        SmallVectorImpl<MachineInstr *> &DeadInsts) {
-    if (MI.getOpcode() != TargetOpcode::G_ANYEXT)
-      return false;
+                        SmallVectorImpl<MachineInstr *> &DeadInsts,
+                        SmallVectorImpl<Register> &UpdatedDefs) {
+    assert(MI.getOpcode() == TargetOpcode::G_ANYEXT);
 
-    Builder.setInstr(MI);
+    Builder.setInstrAndDebugLoc(MI);
     Register DstReg = MI.getOperand(0).getReg();
     Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
 
@@ -59,6 +59,7 @@
     if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
       LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
       Builder.buildAnyExtOrTrunc(DstReg, TruncSrc);
+      UpdatedDefs.push_back(DstReg);
       markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
       return true;
     }
@@ -71,6 +72,7 @@
                                                     m_GSExt(m_Reg(ExtSrc)),
                                                     m_GZExt(m_Reg(ExtSrc)))))) {
       Builder.buildInstr(ExtMI->getOpcode(), {DstReg}, {ExtSrc});
+      UpdatedDefs.push_back(DstReg);
       markInstAndDefDead(MI, *ExtMI, DeadInsts);
       return true;
     }
@@ -79,102 +81,231 @@
     // Can't use MIPattern because we don't have a specific constant in mind.
     auto *SrcMI = MRI.getVRegDef(SrcReg);
     if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
-      const LLT &DstTy = MRI.getType(DstReg);
+      const LLT DstTy = MRI.getType(DstReg);
       if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
         auto &CstVal = SrcMI->getOperand(1);
         Builder.buildConstant(
             DstReg, CstVal.getCImm()->getValue().sext(DstTy.getSizeInBits()));
+        UpdatedDefs.push_back(DstReg);
         markInstAndDefDead(MI, *SrcMI, DeadInsts);
         return true;
       }
     }
-    return tryFoldImplicitDef(MI, DeadInsts);
+    return tryFoldImplicitDef(MI, DeadInsts, UpdatedDefs);
   }
 
   bool tryCombineZExt(MachineInstr &MI,
-                      SmallVectorImpl<MachineInstr *> &DeadInsts) {
+                      SmallVectorImpl<MachineInstr *> &DeadInsts,
+                      SmallVectorImpl<Register> &UpdatedDefs,
+                      GISelObserverWrapper &Observer) {
+    assert(MI.getOpcode() == TargetOpcode::G_ZEXT);
 
-    if (MI.getOpcode() != TargetOpcode::G_ZEXT)
-      return false;
-
-    Builder.setInstr(MI);
+    Builder.setInstrAndDebugLoc(MI);
     Register DstReg = MI.getOperand(0).getReg();
     Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
 
     // zext(trunc x) - > and (aext/copy/trunc x), mask
+    // zext(sext x) -> and (sext x), mask
     Register TruncSrc;
-    if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
+    Register SextSrc;
+    if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))) ||
+        mi_match(SrcReg, MRI, m_GSExt(m_Reg(SextSrc)))) {
       LLT DstTy = MRI.getType(DstReg);
       if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
           isConstantUnsupported(DstTy))
         return false;
       LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
       LLT SrcTy = MRI.getType(SrcReg);
-      APInt Mask = APInt::getAllOnesValue(SrcTy.getScalarSizeInBits());
-      auto MIBMask = Builder.buildConstant(DstTy, Mask.getZExtValue());
-      Builder.buildAnd(DstReg, Builder.buildAnyExtOrTrunc(DstTy, TruncSrc),
-                       MIBMask);
+      APInt MaskVal = APInt::getAllOnesValue(SrcTy.getScalarSizeInBits());
+      auto Mask = Builder.buildConstant(
+        DstTy, MaskVal.zext(DstTy.getScalarSizeInBits()));
+      auto Extended = SextSrc ? Builder.buildSExtOrTrunc(DstTy, SextSrc) :
+                                Builder.buildAnyExtOrTrunc(DstTy, TruncSrc);
+      Builder.buildAnd(DstReg, Extended, Mask);
       markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
       return true;
     }
 
+    // zext(zext x) -> (zext x)
+    Register ZextSrc;
+    if (mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZextSrc)))) {
+      LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI);
+      Observer.changingInstr(MI);
+      MI.getOperand(1).setReg(ZextSrc);
+      Observer.changedInstr(MI);
+      UpdatedDefs.push_back(DstReg);
+      markDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
+      return true;
+    }
+
     // Try to fold zext(g_constant) when the larger constant type is legal.
     // Can't use MIPattern because we don't have a specific constant in mind.
     auto *SrcMI = MRI.getVRegDef(SrcReg);
     if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
-      const LLT &DstTy = MRI.getType(DstReg);
+      const LLT DstTy = MRI.getType(DstReg);
       if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
         auto &CstVal = SrcMI->getOperand(1);
         Builder.buildConstant(
             DstReg, CstVal.getCImm()->getValue().zext(DstTy.getSizeInBits()));
+        UpdatedDefs.push_back(DstReg);
         markInstAndDefDead(MI, *SrcMI, DeadInsts);
         return true;
       }
     }
-    return tryFoldImplicitDef(MI, DeadInsts);
+    return tryFoldImplicitDef(MI, DeadInsts, UpdatedDefs);
   }
 
   bool tryCombineSExt(MachineInstr &MI,
-                      SmallVectorImpl<MachineInstr *> &DeadInsts) {
+                      SmallVectorImpl<MachineInstr *> &DeadInsts,
+                      SmallVectorImpl<Register> &UpdatedDefs) {
+    assert(MI.getOpcode() == TargetOpcode::G_SEXT);
 
-    if (MI.getOpcode() != TargetOpcode::G_SEXT)
-      return false;
+    Builder.setInstrAndDebugLoc(MI);
+    Register DstReg = MI.getOperand(0).getReg();
+    Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
+
+    // sext(trunc x) - > (sext_inreg (aext/copy/trunc x), c)
+    Register TruncSrc;
+    if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
+      LLT DstTy = MRI.getType(DstReg);
+      if (isInstUnsupported({TargetOpcode::G_SEXT_INREG, {DstTy}}))
+        return false;
+      LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
+      LLT SrcTy = MRI.getType(SrcReg);
+      uint64_t SizeInBits = SrcTy.getScalarSizeInBits();
+      Builder.buildInstr(
+          TargetOpcode::G_SEXT_INREG, {DstReg},
+          {Builder.buildAnyExtOrTrunc(DstTy, TruncSrc), SizeInBits});
+      markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
+      return true;
+    }
+
+    // sext(zext x) -> (zext x)
+    // sext(sext x) -> (sext x)
+    Register ExtSrc;
+    MachineInstr *ExtMI;
+    if (mi_match(SrcReg, MRI,
+                 m_all_of(m_MInstr(ExtMI), m_any_of(m_GZExt(m_Reg(ExtSrc)),
+                                                    m_GSExt(m_Reg(ExtSrc)))))) {
+      LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI);
+      Builder.buildInstr(ExtMI->getOpcode(), {DstReg}, {ExtSrc});
+      UpdatedDefs.push_back(DstReg);
+      markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
+      return true;
+    }
+
+    return tryFoldImplicitDef(MI, DeadInsts, UpdatedDefs);
+  }
+
+  bool tryCombineTrunc(MachineInstr &MI,
+                       SmallVectorImpl<MachineInstr *> &DeadInsts,
+                       SmallVectorImpl<Register> &UpdatedDefs,
+                       GISelObserverWrapper &Observer) {
+    assert(MI.getOpcode() == TargetOpcode::G_TRUNC);
 
     Builder.setInstr(MI);
     Register DstReg = MI.getOperand(0).getReg();
     Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
 
-    // sext(trunc x) - > ashr (shl (aext/copy/trunc x), c), c
-    Register TruncSrc;
-    if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
-      LLT DstTy = MRI.getType(DstReg);
-      // Guess on the RHS shift amount type, which should be re-legalized if
-      // applicable.
-      if (isInstUnsupported({TargetOpcode::G_SHL, {DstTy, DstTy}}) ||
-          isInstUnsupported({TargetOpcode::G_ASHR, {DstTy, DstTy}}) ||
-          isConstantUnsupported(DstTy))
+    // Try to fold trunc(g_constant) when the smaller constant type is legal.
+    // Can't use MIPattern because we don't have a specific constant in mind.
+    auto *SrcMI = MRI.getVRegDef(SrcReg);
+    if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
+      const LLT DstTy = MRI.getType(DstReg);
+      if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
+        auto &CstVal = SrcMI->getOperand(1);
+        Builder.buildConstant(
+            DstReg, CstVal.getCImm()->getValue().trunc(DstTy.getSizeInBits()));
+        UpdatedDefs.push_back(DstReg);
+        markInstAndDefDead(MI, *SrcMI, DeadInsts);
+        return true;
+      }
+    }
+
+    // Try to fold trunc(merge) to directly use the source of the merge.
+    // This gets rid of large, difficult to legalize, merges
+    if (SrcMI->getOpcode() == TargetOpcode::G_MERGE_VALUES) {
+      const Register MergeSrcReg = SrcMI->getOperand(1).getReg();
+      const LLT MergeSrcTy = MRI.getType(MergeSrcReg);
+      const LLT DstTy = MRI.getType(DstReg);
+
+      // We can only fold if the types are scalar
+      const unsigned DstSize = DstTy.getSizeInBits();
+      const unsigned MergeSrcSize = MergeSrcTy.getSizeInBits();
+      if (!DstTy.isScalar() || !MergeSrcTy.isScalar())
         return false;
-      LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
-      LLT SrcTy = MRI.getType(SrcReg);
-      unsigned ShAmt = DstTy.getScalarSizeInBits() - SrcTy.getScalarSizeInBits();
-      auto MIBShAmt = Builder.buildConstant(DstTy, ShAmt);
-      auto MIBShl = Builder.buildInstr(
-          TargetOpcode::G_SHL, {DstTy},
-          {Builder.buildAnyExtOrTrunc(DstTy, TruncSrc), MIBShAmt});
-      Builder.buildInstr(TargetOpcode::G_ASHR, {DstReg}, {MIBShl, MIBShAmt});
-      markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
+
+      if (DstSize < MergeSrcSize) {
+        // When the merge source is larger than the destination, we can just
+        // truncate the merge source directly
+        if (isInstUnsupported({TargetOpcode::G_TRUNC, {DstTy, MergeSrcTy}}))
+          return false;
+
+        LLVM_DEBUG(dbgs() << "Combining G_TRUNC(G_MERGE_VALUES) to G_TRUNC: "
+                          << MI);
+
+        Builder.buildTrunc(DstReg, MergeSrcReg);
+        UpdatedDefs.push_back(DstReg);
+      } else if (DstSize == MergeSrcSize) {
+        // If the sizes match we can simply try to replace the register
+        LLVM_DEBUG(
+            dbgs() << "Replacing G_TRUNC(G_MERGE_VALUES) with merge input: "
+                   << MI);
+        replaceRegOrBuildCopy(DstReg, MergeSrcReg, MRI, Builder, UpdatedDefs,
+                              Observer);
+      } else if (DstSize % MergeSrcSize == 0) {
+        // If the trunc size is a multiple of the merge source size we can use
+        // a smaller merge instead
+        if (isInstUnsupported(
+                {TargetOpcode::G_MERGE_VALUES, {DstTy, MergeSrcTy}}))
+          return false;
+
+        LLVM_DEBUG(
+            dbgs() << "Combining G_TRUNC(G_MERGE_VALUES) to G_MERGE_VALUES: "
+                   << MI);
+
+        const unsigned NumSrcs = DstSize / MergeSrcSize;
+        assert(NumSrcs < SrcMI->getNumOperands() - 1 &&
+               "trunc(merge) should require less inputs than merge");
+        SmallVector<Register, 8> SrcRegs(NumSrcs);
+        for (unsigned i = 0; i < NumSrcs; ++i)
+          SrcRegs[i] = SrcMI->getOperand(i + 1).getReg();
+
+        Builder.buildMerge(DstReg, SrcRegs);
+        UpdatedDefs.push_back(DstReg);
+      } else {
+        // Unable to combine
+        return false;
+      }
+
+      markInstAndDefDead(MI, *SrcMI, DeadInsts);
       return true;
     }
-    return tryFoldImplicitDef(MI, DeadInsts);
+
+    // trunc(trunc) -> trunc
+    Register TruncSrc;
+    if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
+      // Always combine trunc(trunc) since the eventual resulting trunc must be
+      // legal anyway as it must be legal for all outputs of the consumer type
+      // set.
+      LLVM_DEBUG(dbgs() << ".. Combine G_TRUNC(G_TRUNC): " << MI);
+
+      Builder.buildTrunc(DstReg, TruncSrc);
+      UpdatedDefs.push_back(DstReg);
+      markInstAndDefDead(MI, *MRI.getVRegDef(TruncSrc), DeadInsts);
+      return true;
+    }
+
+    return false;
   }
 
   /// Try to fold G_[ASZ]EXT (G_IMPLICIT_DEF).
   bool tryFoldImplicitDef(MachineInstr &MI,
-                          SmallVectorImpl<MachineInstr *> &DeadInsts) {
+                          SmallVectorImpl<MachineInstr *> &DeadInsts,
+                          SmallVectorImpl<Register> &UpdatedDefs) {
     unsigned Opcode = MI.getOpcode();
-    if (Opcode != TargetOpcode::G_ANYEXT && Opcode != TargetOpcode::G_ZEXT &&
-        Opcode != TargetOpcode::G_SEXT)
-      return false;
+    assert(Opcode == TargetOpcode::G_ANYEXT || Opcode == TargetOpcode::G_ZEXT ||
+           Opcode == TargetOpcode::G_SEXT);
 
     if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
                                            MI.getOperand(1).getReg(), MRI)) {
@@ -184,10 +315,11 @@
 
       if (Opcode == TargetOpcode::G_ANYEXT) {
         // G_ANYEXT (G_IMPLICIT_DEF) -> G_IMPLICIT_DEF
-        if (isInstUnsupported({TargetOpcode::G_IMPLICIT_DEF, {DstTy}}))
+        if (!isInstLegal({TargetOpcode::G_IMPLICIT_DEF, {DstTy}}))
           return false;
         LLVM_DEBUG(dbgs() << ".. Combine G_ANYEXT(G_IMPLICIT_DEF): " << MI;);
         Builder.buildInstr(TargetOpcode::G_IMPLICIT_DEF, {DstReg}, {});
+        UpdatedDefs.push_back(DstReg);
       } else {
         // G_[SZ]EXT (G_IMPLICIT_DEF) -> G_CONSTANT 0 because the top
         // bits will be 0 for G_ZEXT and 0/1 for the G_SEXT.
@@ -195,6 +327,7 @@
           return false;
         LLVM_DEBUG(dbgs() << ".. Combine G_[SZ]EXT(G_IMPLICIT_DEF): " << MI;);
         Builder.buildConstant(DstReg, 0);
+        UpdatedDefs.push_back(DstReg);
       }
 
       markInstAndDefDead(MI, *DefMI, DeadInsts);
@@ -203,30 +336,248 @@
     return false;
   }
 
-  static unsigned getMergeOpcode(LLT OpTy, LLT DestTy) {
-    if (OpTy.isVector() && DestTy.isVector())
-      return TargetOpcode::G_CONCAT_VECTORS;
+  bool tryFoldUnmergeCast(MachineInstr &MI, MachineInstr &CastMI,
+                          SmallVectorImpl<MachineInstr *> &DeadInsts,
+                          SmallVectorImpl<Register> &UpdatedDefs) {
 
-    if (OpTy.isVector() && !DestTy.isVector())
-      return TargetOpcode::G_BUILD_VECTOR;
+    assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
 
-    return TargetOpcode::G_MERGE_VALUES;
-  }
+    const unsigned CastOpc = CastMI.getOpcode();
 
-  bool tryCombineMerges(MachineInstr &MI,
-                        SmallVectorImpl<MachineInstr *> &DeadInsts) {
-
-    if (MI.getOpcode() != TargetOpcode::G_UNMERGE_VALUES)
+    if (!isArtifactCast(CastOpc))
       return false;
 
+    const unsigned NumDefs = MI.getNumOperands() - 1;
+
+    const Register CastSrcReg = CastMI.getOperand(1).getReg();
+    const LLT CastSrcTy = MRI.getType(CastSrcReg);
+    const LLT DestTy = MRI.getType(MI.getOperand(0).getReg());
+    const LLT SrcTy = MRI.getType(MI.getOperand(NumDefs).getReg());
+
+    const unsigned CastSrcSize = CastSrcTy.getSizeInBits();
+    const unsigned DestSize = DestTy.getSizeInBits();
+
+    if (CastOpc == TargetOpcode::G_TRUNC) {
+      if (SrcTy.isVector() && SrcTy.getScalarType() == DestTy.getScalarType()) {
+        //  %1:_(<4 x s8>) = G_TRUNC %0(<4 x s32>)
+        //  %2:_(s8), %3:_(s8), %4:_(s8), %5:_(s8) = G_UNMERGE_VALUES %1
+        // =>
+        //  %6:_(s32), %7:_(s32), %8:_(s32), %9:_(s32) = G_UNMERGE_VALUES %0
+        //  %2:_(s8) = G_TRUNC %6
+        //  %3:_(s8) = G_TRUNC %7
+        //  %4:_(s8) = G_TRUNC %8
+        //  %5:_(s8) = G_TRUNC %9
+
+        unsigned UnmergeNumElts =
+            DestTy.isVector() ? CastSrcTy.getNumElements() / NumDefs : 1;
+        LLT UnmergeTy = CastSrcTy.changeNumElements(UnmergeNumElts);
+
+        if (isInstUnsupported(
+                {TargetOpcode::G_UNMERGE_VALUES, {UnmergeTy, CastSrcTy}}))
+          return false;
+
+        Builder.setInstr(MI);
+        auto NewUnmerge = Builder.buildUnmerge(UnmergeTy, CastSrcReg);
+
+        for (unsigned I = 0; I != NumDefs; ++I) {
+          Register DefReg = MI.getOperand(I).getReg();
+          UpdatedDefs.push_back(DefReg);
+          Builder.buildTrunc(DefReg, NewUnmerge.getReg(I));
+        }
+
+        markInstAndDefDead(MI, CastMI, DeadInsts);
+        return true;
+      }
+
+      if (CastSrcTy.isScalar() && SrcTy.isScalar() && !DestTy.isVector()) {
+        //  %1:_(s16) = G_TRUNC %0(s32)
+        //  %2:_(s8), %3:_(s8) = G_UNMERGE_VALUES %1
+        // =>
+        //  %2:_(s8), %3:_(s8), %4:_(s8), %5:_(s8) = G_UNMERGE_VALUES %0
+
+        // Unmerge(trunc) can be combined if the trunc source size is a multiple
+        // of the unmerge destination size
+        if (CastSrcSize % DestSize != 0)
+          return false;
+
+        // Check if the new unmerge is supported
+        if (isInstUnsupported(
+                {TargetOpcode::G_UNMERGE_VALUES, {DestTy, CastSrcTy}}))
+          return false;
+
+        // Gather the original destination registers and create new ones for the
+        // unused bits
+        const unsigned NewNumDefs = CastSrcSize / DestSize;
+        SmallVector<Register, 8> DstRegs(NewNumDefs);
+        for (unsigned Idx = 0; Idx < NewNumDefs; ++Idx) {
+          if (Idx < NumDefs)
+            DstRegs[Idx] = MI.getOperand(Idx).getReg();
+          else
+            DstRegs[Idx] = MRI.createGenericVirtualRegister(DestTy);
+        }
+
+        // Build new unmerge
+        Builder.setInstr(MI);
+        Builder.buildUnmerge(DstRegs, CastSrcReg);
+        UpdatedDefs.append(DstRegs.begin(), DstRegs.begin() + NewNumDefs);
+        markInstAndDefDead(MI, CastMI, DeadInsts);
+        return true;
+      }
+    }
+
+    // TODO: support combines with other casts as well
+    return false;
+  }
+
+  static bool canFoldMergeOpcode(unsigned MergeOp, unsigned ConvertOp,
+                                 LLT OpTy, LLT DestTy) {
+    // Check if we found a definition that is like G_MERGE_VALUES.
+    switch (MergeOp) {
+    default:
+      return false;
+    case TargetOpcode::G_BUILD_VECTOR:
+    case TargetOpcode::G_MERGE_VALUES:
+      // The convert operation that we will need to insert is
+      // going to convert the input of that type of instruction (scalar)
+      // to the destination type (DestTy).
+      // The conversion needs to stay in the same domain (scalar to scalar
+      // and vector to vector), so if we were to allow to fold the merge
+      // we would need to insert some bitcasts.
+      // E.g.,
+      // <2 x s16> = build_vector s16, s16
+      // <2 x s32> = zext <2 x s16>
+      // <2 x s16>, <2 x s16> = unmerge <2 x s32>
+      //
+      // As is the folding would produce:
+      // <2 x s16> = zext s16  <-- scalar to vector
+      // <2 x s16> = zext s16  <-- scalar to vector
+      // Which is invalid.
+      // Instead we would want to generate:
+      // s32 = zext s16
+      // <2 x s16> = bitcast s32
+      // s32 = zext s16
+      // <2 x s16> = bitcast s32
+      //
+      // That is not done yet.
+      if (ConvertOp == 0)
+        return true;
+      return !DestTy.isVector() && OpTy.isVector();
+    case TargetOpcode::G_CONCAT_VECTORS: {
+      if (ConvertOp == 0)
+        return true;
+      if (!DestTy.isVector())
+        return false;
+
+      const unsigned OpEltSize = OpTy.getElementType().getSizeInBits();
+
+      // Don't handle scalarization with a cast that isn't in the same
+      // direction as the vector cast. This could be handled, but it would
+      // require more intermediate unmerges.
+      if (ConvertOp == TargetOpcode::G_TRUNC)
+        return DestTy.getSizeInBits() <= OpEltSize;
+      return DestTy.getSizeInBits() >= OpEltSize;
+    }
+    }
+  }
+
+  /// Try to replace DstReg with SrcReg or build a COPY instruction
+  /// depending on the register constraints.
+  static void replaceRegOrBuildCopy(Register DstReg, Register SrcReg,
+                                    MachineRegisterInfo &MRI,
+                                    MachineIRBuilder &Builder,
+                                    SmallVectorImpl<Register> &UpdatedDefs,
+                                    GISelChangeObserver &Observer) {
+    if (!llvm::canReplaceReg(DstReg, SrcReg, MRI)) {
+      Builder.buildCopy(DstReg, SrcReg);
+      UpdatedDefs.push_back(DstReg);
+      return;
+    }
+    SmallVector<MachineInstr *, 4> UseMIs;
+    // Get the users and notify the observer before replacing.
+    for (auto &UseMI : MRI.use_instructions(DstReg)) {
+      UseMIs.push_back(&UseMI);
+      Observer.changingInstr(UseMI);
+    }
+    // Replace the registers.
+    MRI.replaceRegWith(DstReg, SrcReg);
+    UpdatedDefs.push_back(SrcReg);
+    // Notify the observer that we changed the instructions.
+    for (auto *UseMI : UseMIs)
+      Observer.changedInstr(*UseMI);
+  }
+
+  /// Return the operand index in \p MI that defines \p Def
+  static unsigned getDefIndex(const MachineInstr &MI, Register SearchDef) {
+    unsigned DefIdx = 0;
+    for (const MachineOperand &Def : MI.defs()) {
+      if (Def.getReg() == SearchDef)
+        break;
+      ++DefIdx;
+    }
+
+    return DefIdx;
+  }
+
+  bool tryCombineUnmergeValues(MachineInstr &MI,
+                               SmallVectorImpl<MachineInstr *> &DeadInsts,
+                               SmallVectorImpl<Register> &UpdatedDefs,
+                               GISelChangeObserver &Observer) {
+    assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
+
     unsigned NumDefs = MI.getNumOperands() - 1;
-    MachineInstr *SrcDef =
-        getDefIgnoringCopies(MI.getOperand(NumDefs).getReg(), MRI);
+    Register SrcReg = MI.getOperand(NumDefs).getReg();
+    MachineInstr *SrcDef = getDefIgnoringCopies(SrcReg, MRI);
     if (!SrcDef)
       return false;
 
     LLT OpTy = MRI.getType(MI.getOperand(NumDefs).getReg());
     LLT DestTy = MRI.getType(MI.getOperand(0).getReg());
+
+    if (SrcDef->getOpcode() == TargetOpcode::G_UNMERGE_VALUES) {
+      // %0:_(<4 x s16>) = G_FOO
+      // %1:_(<2 x s16>), %2:_(<2 x s16>) = G_UNMERGE_VALUES %0
+      // %3:_(s16), %4:_(s16) = G_UNMERGE_VALUES %1
+      //
+      // %3:_(s16), %4:_(s16), %5:_(s16), %6:_(s16) = G_UNMERGE_VALUES %0
+      const unsigned NumSrcOps = SrcDef->getNumOperands();
+      Register SrcUnmergeSrc = SrcDef->getOperand(NumSrcOps - 1).getReg();
+      LLT SrcUnmergeSrcTy = MRI.getType(SrcUnmergeSrc);
+
+      // If we need to decrease the number of vector elements in the result type
+      // of an unmerge, this would involve the creation of an equivalent unmerge
+      // to copy back to the original result registers.
+      LegalizeActionStep ActionStep = LI.getAction(
+          {TargetOpcode::G_UNMERGE_VALUES, {OpTy, SrcUnmergeSrcTy}});
+      switch (ActionStep.Action) {
+      case LegalizeActions::Lower:
+      case LegalizeActions::Unsupported:
+        break;
+      case LegalizeActions::FewerElements:
+      case LegalizeActions::NarrowScalar:
+        if (ActionStep.TypeIdx == 1)
+          return false;
+        break;
+      default:
+        return false;
+      }
+
+      Builder.setInstrAndDebugLoc(MI);
+      auto NewUnmerge = Builder.buildUnmerge(DestTy, SrcUnmergeSrc);
+
+      // TODO: Should we try to process out the other defs now? If the other
+      // defs of the source unmerge are also unmerged, we end up with a separate
+      // unmerge for each one.
+      unsigned SrcDefIdx = getDefIndex(*SrcDef, SrcReg);
+      for (unsigned I = 0; I != NumDefs; ++I) {
+        Register Def = MI.getOperand(I).getReg();
+        replaceRegOrBuildCopy(Def, NewUnmerge.getReg(SrcDefIdx * NumDefs + I),
+                              MRI, Builder, UpdatedDefs, Observer);
+      }
+
+      markInstAndDefDead(MI, *SrcDef, DeadInsts, SrcDefIdx);
+      return true;
+    }
+
     MachineInstr *MergeI = SrcDef;
     unsigned ConvertOp = 0;
 
@@ -237,16 +588,17 @@
       MergeI = getDefIgnoringCopies(SrcDef->getOperand(1).getReg(), MRI);
     }
 
-    // FIXME: Handle scalarizing concat_vectors (scalar result type with vector
-    // source)
-    unsigned MergingOpcode = getMergeOpcode(OpTy, DestTy);
-    if (!MergeI || MergeI->getOpcode() != MergingOpcode)
-      return false;
+    if (!MergeI || !canFoldMergeOpcode(MergeI->getOpcode(),
+                                       ConvertOp, OpTy, DestTy)) {
+      // We might have a chance to combine later by trying to combine
+      // unmerge(cast) first
+      return tryFoldUnmergeCast(MI, *SrcDef, DeadInsts, UpdatedDefs);
+    }
 
     const unsigned NumMergeRegs = MergeI->getNumOperands() - 1;
 
     if (NumMergeRegs < NumDefs) {
-      if (ConvertOp != 0 || NumDefs % NumMergeRegs != 0)
+      if (NumDefs % NumMergeRegs != 0)
         return false;
 
       Builder.setInstr(MI);
@@ -259,12 +611,45 @@
 
       const unsigned NewNumDefs = NumDefs / NumMergeRegs;
       for (unsigned Idx = 0; Idx < NumMergeRegs; ++Idx) {
-        SmallVector<Register, 2> DstRegs;
+        SmallVector<Register, 8> DstRegs;
         for (unsigned j = 0, DefIdx = Idx * NewNumDefs; j < NewNumDefs;
              ++j, ++DefIdx)
           DstRegs.push_back(MI.getOperand(DefIdx).getReg());
 
-        Builder.buildUnmerge(DstRegs, MergeI->getOperand(Idx + 1).getReg());
+        if (ConvertOp) {
+          LLT MergeSrcTy = MRI.getType(MergeI->getOperand(1).getReg());
+
+          // This is a vector that is being split and casted. Extract to the
+          // element type, and do the conversion on the scalars (or smaller
+          // vectors).
+          LLT MergeEltTy = MergeSrcTy.divide(NewNumDefs);
+
+          // Handle split to smaller vectors, with conversions.
+          // %2(<8 x s8>) = G_CONCAT_VECTORS %0(<4 x s8>), %1(<4 x s8>)
+          // %3(<8 x s16>) = G_SEXT %2
+          // %4(<2 x s16>), %5(<2 x s16>), %6(<2 x s16>), %7(<2 x s16>) = G_UNMERGE_VALUES %3
+          //
+          // =>
+          //
+          // %8(<2 x s8>), %9(<2 x s8>) = G_UNMERGE_VALUES %0
+          // %10(<2 x s8>), %11(<2 x s8>) = G_UNMERGE_VALUES %1
+          // %4(<2 x s16>) = G_SEXT %8
+          // %5(<2 x s16>) = G_SEXT %9
+          // %6(<2 x s16>) = G_SEXT %10
+          // %7(<2 x s16>)= G_SEXT %11
+
+          SmallVector<Register, 4> TmpRegs(NewNumDefs);
+          for (unsigned k = 0; k < NewNumDefs; ++k)
+            TmpRegs[k] = MRI.createGenericVirtualRegister(MergeEltTy);
+
+          Builder.buildUnmerge(TmpRegs, MergeI->getOperand(Idx + 1).getReg());
+
+          for (unsigned k = 0; k < NewNumDefs; ++k)
+            Builder.buildInstr(ConvertOp, {DstRegs[k]}, {TmpRegs[k]});
+        } else {
+          Builder.buildUnmerge(DstRegs, MergeI->getOperand(Idx + 1).getReg());
+        }
+        UpdatedDefs.append(DstRegs.begin(), DstRegs.end());
       }
 
     } else if (NumMergeRegs > NumDefs) {
@@ -281,36 +666,47 @@
 
       const unsigned NumRegs = NumMergeRegs / NumDefs;
       for (unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) {
-        SmallVector<Register, 2> Regs;
+        SmallVector<Register, 8> Regs;
         for (unsigned j = 0, Idx = NumRegs * DefIdx + 1; j < NumRegs;
              ++j, ++Idx)
           Regs.push_back(MergeI->getOperand(Idx).getReg());
 
-        Builder.buildMerge(MI.getOperand(DefIdx).getReg(), Regs);
+        Register DefReg = MI.getOperand(DefIdx).getReg();
+        Builder.buildMerge(DefReg, Regs);
+        UpdatedDefs.push_back(DefReg);
       }
 
     } else {
       LLT MergeSrcTy = MRI.getType(MergeI->getOperand(1).getReg());
+
+      if (!ConvertOp && DestTy != MergeSrcTy)
+        ConvertOp = TargetOpcode::G_BITCAST;
+
       if (ConvertOp) {
         Builder.setInstr(MI);
 
         for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
           Register MergeSrc = MergeI->getOperand(Idx + 1).getReg();
-          Builder.buildInstr(ConvertOp, {MI.getOperand(Idx).getReg()},
-                             {MergeSrc});
+          Register DefReg = MI.getOperand(Idx).getReg();
+          Builder.buildInstr(ConvertOp, {DefReg}, {MergeSrc});
+          UpdatedDefs.push_back(DefReg);
         }
 
         markInstAndDefDead(MI, *MergeI, DeadInsts);
         return true;
       }
-      // FIXME: is a COPY appropriate if the types mismatch? We know both
-      // registers are allocatable by now.
-      if (DestTy != MergeSrcTy)
-        return false;
 
-      for (unsigned Idx = 0; Idx < NumDefs; ++Idx)
-        MRI.replaceRegWith(MI.getOperand(Idx).getReg(),
-                           MergeI->getOperand(Idx + 1).getReg());
+      assert(DestTy == MergeSrcTy &&
+             "Bitcast and the other kinds of conversions should "
+             "have happened earlier");
+
+      Builder.setInstr(MI);
+      for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
+        Register DstReg = MI.getOperand(Idx).getReg();
+        Register SrcReg = MergeI->getOperand(Idx + 1).getReg();
+        replaceRegOrBuildCopy(DstReg, SrcReg, MRI, Builder, UpdatedDefs,
+                              Observer);
+      }
     }
 
     markInstAndDefDead(MI, *MergeI, DeadInsts);
@@ -329,7 +725,8 @@
   }
 
   bool tryCombineExtract(MachineInstr &MI,
-                         SmallVectorImpl<MachineInstr *> &DeadInsts) {
+                         SmallVectorImpl<MachineInstr *> &DeadInsts,
+                         SmallVectorImpl<Register> &UpdatedDefs) {
     assert(MI.getOpcode() == TargetOpcode::G_EXTRACT);
 
     // Try to use the source registers from a G_MERGE_VALUES
@@ -344,13 +741,14 @@
     // for N >= %2.getSizeInBits() / 2
     //    %3 = G_EXTRACT %1, (N - %0.getSizeInBits()
 
-    unsigned Src = lookThroughCopyInstrs(MI.getOperand(1).getReg());
-    MachineInstr *MergeI = MRI.getVRegDef(Src);
+    Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
+    MachineInstr *MergeI = MRI.getVRegDef(SrcReg);
     if (!MergeI || !isMergeLikeOpcode(MergeI->getOpcode()))
       return false;
 
-    LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
-    LLT SrcTy = MRI.getType(Src);
+    Register DstReg = MI.getOperand(0).getReg();
+    LLT DstTy = MRI.getType(DstReg);
+    LLT SrcTy = MRI.getType(SrcReg);
 
     // TODO: Do we need to check if the resulting extract is supported?
     unsigned ExtractDstSize = DstTy.getSizeInBits();
@@ -368,10 +766,9 @@
 
     // TODO: We could modify MI in place in most cases.
     Builder.setInstr(MI);
-    Builder.buildExtract(
-      MI.getOperand(0).getReg(),
-      MergeI->getOperand(MergeSrcIdx + 1).getReg(),
-      Offset - MergeSrcIdx * MergeSrcSize);
+    Builder.buildExtract(DstReg, MergeI->getOperand(MergeSrcIdx + 1).getReg(),
+                         Offset - MergeSrcIdx * MergeSrcSize);
+    UpdatedDefs.push_back(DstReg);
     markInstAndDefDead(MI, *MergeI, DeadInsts);
     return true;
   }
@@ -388,55 +785,117 @@
     // etc, process the dead instructions now if any.
     if (!DeadInsts.empty())
       deleteMarkedDeadInsts(DeadInsts, WrapperObserver);
+
+    // Put here every vreg that was redefined in such a way that it's at least
+    // possible that one (or more) of its users (immediate or COPY-separated)
+    // could become artifact combinable with the new definition (or the
+    // instruction reachable from it through a chain of copies if any).
+    SmallVector<Register, 4> UpdatedDefs;
+    bool Changed = false;
     switch (MI.getOpcode()) {
     default:
       return false;
     case TargetOpcode::G_ANYEXT:
-      return tryCombineAnyExt(MI, DeadInsts);
+      Changed = tryCombineAnyExt(MI, DeadInsts, UpdatedDefs);
+      break;
     case TargetOpcode::G_ZEXT:
-      return tryCombineZExt(MI, DeadInsts);
+      Changed = tryCombineZExt(MI, DeadInsts, UpdatedDefs, WrapperObserver);
+      break;
     case TargetOpcode::G_SEXT:
-      return tryCombineSExt(MI, DeadInsts);
+      Changed = tryCombineSExt(MI, DeadInsts, UpdatedDefs);
+      break;
     case TargetOpcode::G_UNMERGE_VALUES:
-      return tryCombineMerges(MI, DeadInsts);
+      Changed =
+          tryCombineUnmergeValues(MI, DeadInsts, UpdatedDefs, WrapperObserver);
+      break;
+    case TargetOpcode::G_MERGE_VALUES:
+    case TargetOpcode::G_BUILD_VECTOR:
+    case TargetOpcode::G_CONCAT_VECTORS:
+      // If any of the users of this merge are an unmerge, then add them to the
+      // artifact worklist in case there's folding that can be done looking up.
+      for (MachineInstr &U : MRI.use_instructions(MI.getOperand(0).getReg())) {
+        if (U.getOpcode() == TargetOpcode::G_UNMERGE_VALUES ||
+            U.getOpcode() == TargetOpcode::G_TRUNC) {
+          UpdatedDefs.push_back(MI.getOperand(0).getReg());
+          break;
+        }
+      }
+      break;
     case TargetOpcode::G_EXTRACT:
-      return tryCombineExtract(MI, DeadInsts);
-    case TargetOpcode::G_TRUNC: {
-      bool Changed = false;
-      for (auto &Use : MRI.use_instructions(MI.getOperand(0).getReg()))
-        Changed |= tryCombineInstruction(Use, DeadInsts, WrapperObserver);
-      return Changed;
+      Changed = tryCombineExtract(MI, DeadInsts, UpdatedDefs);
+      break;
+    case TargetOpcode::G_TRUNC:
+      Changed = tryCombineTrunc(MI, DeadInsts, UpdatedDefs, WrapperObserver);
+      if (!Changed) {
+        // Try to combine truncates away even if they are legal. As all artifact
+        // combines at the moment look only "up" the def-use chains, we achieve
+        // that by throwing truncates' users (with look through copies) into the
+        // ArtifactList again.
+        UpdatedDefs.push_back(MI.getOperand(0).getReg());
+      }
+      break;
     }
+    // If the main loop through the ArtifactList found at least one combinable
+    // pair of artifacts, not only combine it away (as done above), but also
+    // follow the def-use chain from there to combine everything that can be
+    // combined within this def-use chain of artifacts.
+    while (!UpdatedDefs.empty()) {
+      Register NewDef = UpdatedDefs.pop_back_val();
+      assert(NewDef.isVirtual() && "Unexpected redefinition of a physreg");
+      for (MachineInstr &Use : MRI.use_instructions(NewDef)) {
+        switch (Use.getOpcode()) {
+        // Keep this list in sync with the list of all artifact combines.
+        case TargetOpcode::G_ANYEXT:
+        case TargetOpcode::G_ZEXT:
+        case TargetOpcode::G_SEXT:
+        case TargetOpcode::G_UNMERGE_VALUES:
+        case TargetOpcode::G_EXTRACT:
+        case TargetOpcode::G_TRUNC:
+          // Adding Use to ArtifactList.
+          WrapperObserver.changedInstr(Use);
+          break;
+        case TargetOpcode::COPY: {
+          Register Copy = Use.getOperand(0).getReg();
+          if (Copy.isVirtual())
+            UpdatedDefs.push_back(Copy);
+          break;
+        }
+        default:
+          // If we do not have an artifact combine for the opcode, there is no
+          // point in adding it to the ArtifactList as nothing interesting will
+          // be done to it anyway.
+          break;
+        }
+      }
     }
+    return Changed;
   }
 
 private:
-
-  static unsigned getArtifactSrcReg(const MachineInstr &MI) {
+  static Register getArtifactSrcReg(const MachineInstr &MI) {
     switch (MI.getOpcode()) {
     case TargetOpcode::COPY:
     case TargetOpcode::G_TRUNC:
     case TargetOpcode::G_ZEXT:
     case TargetOpcode::G_ANYEXT:
     case TargetOpcode::G_SEXT:
-    case TargetOpcode::G_UNMERGE_VALUES:
-      return MI.getOperand(MI.getNumOperands() - 1).getReg();
     case TargetOpcode::G_EXTRACT:
       return MI.getOperand(1).getReg();
+    case TargetOpcode::G_UNMERGE_VALUES:
+      return MI.getOperand(MI.getNumOperands() - 1).getReg();
     default:
       llvm_unreachable("Not a legalization artifact happen");
     }
   }
 
-  /// Mark MI as dead. If a def of one of MI's operands, DefMI, would also be
-  /// dead due to MI being killed, then mark DefMI as dead too.
-  /// Some of the combines (extends(trunc)), try to walk through redundant
-  /// copies in between the extends and the truncs, and this attempts to collect
-  /// the in between copies if they're dead.
-  void markInstAndDefDead(MachineInstr &MI, MachineInstr &DefMI,
-                          SmallVectorImpl<MachineInstr *> &DeadInsts) {
-    DeadInsts.push_back(&MI);
-
+  /// Mark a def of one of MI's original operands, DefMI, as dead if changing MI
+  /// (either by killing it or changing operands) results in DefMI being dead
+  /// too. In-between COPYs or artifact-casts are also collected if they are
+  /// dead.
+  /// MI is not marked dead.
+  void markDefDead(MachineInstr &MI, MachineInstr &DefMI,
+                   SmallVectorImpl<MachineInstr *> &DeadInsts,
+                   unsigned DefIdx = 0) {
     // Collect all the copy instructions that are made dead, due to deleting
     // this instruction. Collect all of them until the Trunc(DefMI).
     // Eg,
@@ -448,7 +907,7 @@
     // and as a result, %3, %2, %1 are dead.
     MachineInstr *PrevMI = &MI;
     while (PrevMI != &DefMI) {
-      unsigned PrevRegSrc = getArtifactSrcReg(*PrevMI);
+      Register PrevRegSrc = getArtifactSrcReg(*PrevMI);
 
       MachineInstr *TmpDef = MRI.getVRegDef(PrevRegSrc);
       if (MRI.hasOneUse(PrevRegSrc)) {
@@ -463,8 +922,39 @@
         break;
       PrevMI = TmpDef;
     }
-    if (PrevMI == &DefMI && MRI.hasOneUse(DefMI.getOperand(0).getReg()))
-      DeadInsts.push_back(&DefMI);
+
+    if (PrevMI == &DefMI) {
+      unsigned I = 0;
+      bool IsDead = true;
+      for (MachineOperand &Def : DefMI.defs()) {
+        if (I != DefIdx) {
+          if (!MRI.use_empty(Def.getReg())) {
+            IsDead = false;
+            break;
+          }
+        } else {
+          if (!MRI.hasOneUse(DefMI.getOperand(DefIdx).getReg()))
+            break;
+        }
+
+        ++I;
+      }
+
+      if (IsDead)
+        DeadInsts.push_back(&DefMI);
+    }
+  }
+
+  /// Mark MI as dead. If a def of one of MI's operands, DefMI, would also be
+  /// dead due to MI being killed, then mark DefMI as dead too.
+  /// Some of the combines (extends(trunc)), try to walk through redundant
+  /// copies in between the extends and the truncs, and this attempts to collect
+  /// the in between copies if they're dead.
+  void markInstAndDefDead(MachineInstr &MI, MachineInstr &DefMI,
+                          SmallVectorImpl<MachineInstr *> &DeadInsts,
+                          unsigned DefIdx = 0) {
+    DeadInsts.push_back(&MI);
+    markDefDead(MI, DefMI, DeadInsts, DefIdx);
   }
 
   /// Erase the dead instructions in the list and call the observer hooks.
@@ -506,7 +996,7 @@
 
   /// Looks through copy instructions and returns the actual
   /// source register.
-  unsigned lookThroughCopyInstrs(Register Reg) {
+  Register lookThroughCopyInstrs(Register Reg) {
     Register TmpReg;
     while (mi_match(Reg, MRI, m_Copy(m_Reg(TmpReg)))) {
       if (MRI.getType(TmpReg).isValid())
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Legalizer.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Legalizer.h
index 13cf3f7..690e84f 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Legalizer.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Legalizer.h
@@ -26,13 +26,18 @@
 namespace llvm {
 
 class MachineRegisterInfo;
+class LostDebugLocObserver;
 
 class Legalizer : public MachineFunctionPass {
 public:
   static char ID;
 
-private:
+  struct MFResult {
+    bool Changed;
+    const MachineInstr *FailedOn;
+  };
 
+private:
   /// Initialize the field members using \p MF.
   void init(MachineFunction &MF);
 
@@ -55,14 +60,17 @@
   }
 
   MachineFunctionProperties getClearedProperties() const override {
-    return MachineFunctionProperties()
-      .set(MachineFunctionProperties::Property::NoPHIs);
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::NoPHIs);
   }
 
-  bool combineExtracts(MachineInstr &MI, MachineRegisterInfo &MRI,
-                       const TargetInstrInfo &TII);
-
   bool runOnMachineFunction(MachineFunction &MF) override;
+
+  static MFResult
+  legalizeMachineFunction(MachineFunction &MF, const LegalizerInfo &LI,
+                          ArrayRef<GISelChangeObserver *> AuxObservers,
+                          LostDebugLocObserver &LocObserver,
+                          MachineIRBuilder &MIRBuilder);
 };
 } // End namespace llvm.
 
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index e8cb65f..2e9c7d8 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -32,9 +32,23 @@
 class Legalizer;
 class MachineRegisterInfo;
 class GISelChangeObserver;
+class TargetLowering;
 
 class LegalizerHelper {
 public:
+  /// Expose MIRBuilder so clients can set their own RecordInsertInstruction
+  /// functions
+  MachineIRBuilder &MIRBuilder;
+
+  /// To keep track of changes made by the LegalizerHelper.
+  GISelChangeObserver &Observer;
+
+private:
+  MachineRegisterInfo &MRI;
+  const LegalizerInfo &LI;
+  const TargetLowering &TLI;
+
+public:
   enum LegalizeResult {
     /// Instruction was already legal and no change was made to the
     /// MachineFunction.
@@ -48,6 +62,10 @@
     UnableToLegalize,
   };
 
+  /// Expose LegalizerInfo so the clients can re-use.
+  const LegalizerInfo &getLegalizerInfo() const { return LI; }
+  const TargetLowering &getTargetLowering() const { return TLI; }
+
   LegalizerHelper(MachineFunction &MF, GISelChangeObserver &Observer,
                   MachineIRBuilder &B);
   LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI,
@@ -74,6 +92,9 @@
   /// precision, ignoring the unused bits).
   LegalizeResult widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
 
+  /// Legalize an instruction by replacing the value type
+  LegalizeResult bitcast(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+
   /// Legalize an instruction by splitting it into simpler parts, hopefully
   /// understood by the target.
   LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
@@ -88,14 +109,13 @@
   LegalizeResult moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
                                     LLT MoreTy);
 
-  /// Expose MIRBuilder so clients can set their own RecordInsertInstruction
-  /// functions
-  MachineIRBuilder &MIRBuilder;
+  /// Cast the given value to an LLT::scalar with an equivalent size. Returns
+  /// the register to use if an instruction was inserted. Returns the original
+  /// register if no coercion was necessary.
+  //
+  // This may also fail and return Register() if there is no legal way to cast.
+  Register coerceToScalar(Register Val);
 
-  /// Expose LegalizerInfo so the clients can re-use.
-  const LegalizerInfo &getLegalizerInfo() const { return LI; }
-
-private:
   /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
   /// Use by extending the operand's type to \p WideTy using the specified \p
   /// ExtOpcode for the extension instruction, and replacing the vreg of the
@@ -129,6 +149,19 @@
   /// original vector type, and replacing the vreg of the operand in place.
   void moreElementsVectorSrc(MachineInstr &MI, LLT MoreTy, unsigned OpIdx);
 
+  /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
+  /// use by inserting a G_BITCAST to \p CastTy
+  void bitcastSrc(MachineInstr &MI, LLT CastTy, unsigned OpIdx);
+
+  /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
+  /// def by inserting a G_BITCAST from \p CastTy
+  void bitcastDst(MachineInstr &MI, LLT CastTy, unsigned OpIdx);
+
+  /// Widen \p OrigReg to \p WideTy by merging to a wider type, padding with
+  /// G_IMPLICIT_DEF, and producing dead results.
+  Register widenWithUnmerge(LLT WideTy, Register OrigReg);
+
+private:
   LegalizeResult
   widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
   LegalizeResult
@@ -137,6 +170,8 @@
   widenScalarExtract(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
   LegalizeResult
   widenScalarInsert(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
+  LegalizeResult
+  widenScalarAddSubShlSat(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
 
   /// Helper function to split a wide generic register into bitwise blocks with
   /// the given Type (which implies the number of blocks needed). The generic
@@ -163,6 +198,44 @@
                    LLT PartTy, ArrayRef<Register> PartRegs,
                    LLT LeftoverTy = LLT(), ArrayRef<Register> LeftoverRegs = {});
 
+  /// Unmerge \p SrcReg into smaller sized values, and append them to \p
+  /// Parts. The elements of \p Parts will be the greatest common divisor type
+  /// of \p DstTy, \p NarrowTy and the type of \p SrcReg. This will compute and
+  /// return the GCD type.
+  LLT extractGCDType(SmallVectorImpl<Register> &Parts, LLT DstTy,
+                     LLT NarrowTy, Register SrcReg);
+
+  /// Unmerge \p SrcReg into \p GCDTy typed registers. This will append all of
+  /// the unpacked registers to \p Parts. This version is if the common unmerge
+  /// type is already known.
+  void extractGCDType(SmallVectorImpl<Register> &Parts, LLT GCDTy,
+                      Register SrcReg);
+
+  /// Produce a merge of values in \p VRegs to define \p DstReg. Perform a merge
+  /// from the least common multiple type, and convert as appropriate to \p
+  /// DstReg.
+  ///
+  /// \p VRegs should each have type \p GCDTy. This type should be greatest
+  /// common divisor type of \p DstReg, \p NarrowTy, and an undetermined source
+  /// type.
+  ///
+  /// \p NarrowTy is the desired result merge source type. If the source value
+  /// needs to be widened to evenly cover \p DstReg, inserts high bits
+  /// corresponding to the extension opcode \p PadStrategy.
+  ///
+  /// \p VRegs will be cleared, and the the result \p NarrowTy register pieces
+  /// will replace it. Returns The complete LCMTy that \p VRegs will cover when
+  /// merged.
+  LLT buildLCMMergePieces(LLT DstTy, LLT NarrowTy, LLT GCDTy,
+                          SmallVectorImpl<Register> &VRegs,
+                          unsigned PadStrategy = TargetOpcode::G_ANYEXT);
+
+  /// Merge the values in \p RemergeRegs to an \p LCMTy typed value. Extract the
+  /// low bits into \p DstReg. This is intended to use the outputs from
+  /// buildLCMMergePieces after processing.
+  void buildWidenedRemergeToDst(Register DstReg, LLT LCMTy,
+                                ArrayRef<Register> RemergeRegs);
+
   /// Perform generic multiplication of values held in multiple registers.
   /// Generated instructions use only types NarrowTy and i1.
   /// Destination can be same or two times size of the source.
@@ -170,14 +243,26 @@
                          ArrayRef<Register> Src1Regs,
                          ArrayRef<Register> Src2Regs, LLT NarrowTy);
 
+  void changeOpcode(MachineInstr &MI, unsigned NewOpcode);
+
+public:
+  /// Return the alignment to use for a stack temporary object with the given
+  /// type.
+  Align getStackTemporaryAlignment(LLT Type, Align MinAlign = Align()) const;
+
+  /// Create a stack temporary based on the size in bytes and the alignment
+  MachineInstrBuilder createStackTemporary(TypeSize Bytes, Align Alignment,
+                                           MachinePointerInfo &PtrInfo);
+
+  /// Get a pointer to vector element \p Index located in memory for a vector of
+  /// type \p VecTy starting at a base address of \p VecPtr. If \p Index is out
+  /// of bounds the returned pointer is unspecified, but will be within the
+  /// vector bounds.
+  Register getVectorElementPointer(Register VecPtr, LLT VecTy, Register Index);
+
   LegalizeResult fewerElementsVectorImplicitDef(MachineInstr &MI,
                                                 unsigned TypeIdx, LLT NarrowTy);
 
-  /// Legalize a simple vector instruction where all operands are the same type
-  /// by splitting into multiple components.
-  LegalizeResult fewerElementsVectorBasic(MachineInstr &MI, unsigned TypeIdx,
-                                          LLT NarrowTy);
-
   /// Legalize a instruction with a vector type where each operand may have a
   /// different element type. All type indexes must have the same number of
   /// elements.
@@ -199,9 +284,31 @@
   LegalizeResult moreElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx,
                                        LLT MoreTy);
 
+  LegalizeResult fewerElementsVectorUnmergeValues(MachineInstr &MI,
+                                                  unsigned TypeIdx,
+                                                  LLT NarrowTy);
+  LegalizeResult fewerElementsVectorMerge(MachineInstr &MI, unsigned TypeIdx,
+                                          LLT NarrowTy);
+  LegalizeResult fewerElementsVectorExtractInsertVectorElt(MachineInstr &MI,
+                                                           unsigned TypeIdx,
+                                                           LLT NarrowTy);
+
   LegalizeResult
   reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
 
+  /// Legalize an instruction by reducing the operation width, either by
+  /// narrowing the type of the operation or by reducing the number of elements
+  /// of a vector.
+  /// The used strategy (narrow vs. fewerElements) is decided by \p NarrowTy.
+  /// Narrow is used if the scalar type of \p NarrowTy and \p DstTy differ,
+  /// fewerElements is used when the scalar type is the same but the number of
+  /// elements between \p NarrowTy and \p DstTy differ.
+  LegalizeResult reduceOperationWidth(MachineInstr &MI, unsigned TypeIdx,
+                                      LLT NarrowTy);
+
+  LegalizeResult fewerElementsVectorSextInReg(MachineInstr &MI, unsigned TypeIdx,
+                                              LLT NarrowTy);
+
   LegalizeResult narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
                                              LLT HalfTy, LLT ShiftAmtTy);
 
@@ -211,28 +318,78 @@
   LegalizeResult narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
 
   LegalizeResult narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+  LegalizeResult narrowScalarExt(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
   LegalizeResult narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+  LegalizeResult narrowScalarCTLZ(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+  LegalizeResult narrowScalarCTTZ(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+  LegalizeResult narrowScalarCTPOP(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
 
-  LegalizeResult lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+  /// Perform Bitcast legalize action on G_EXTRACT_VECTOR_ELT.
+  LegalizeResult bitcastExtractVectorElt(MachineInstr &MI, unsigned TypeIdx,
+                                         LLT CastTy);
+
+  /// Perform Bitcast legalize action on G_INSERT_VECTOR_ELT.
+  LegalizeResult bitcastInsertVectorElt(MachineInstr &MI, unsigned TypeIdx,
+                                        LLT CastTy);
+
+  LegalizeResult lowerBitcast(MachineInstr &MI);
+  LegalizeResult lowerLoad(MachineInstr &MI);
+  LegalizeResult lowerStore(MachineInstr &MI);
+  LegalizeResult lowerBitCount(MachineInstr &MI);
 
   LegalizeResult lowerU64ToF32BitOps(MachineInstr &MI);
-  LegalizeResult lowerUITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
-  LegalizeResult lowerSITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
-  LegalizeResult lowerMinMax(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
-  LegalizeResult lowerFCopySign(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+  LegalizeResult lowerUITOFP(MachineInstr &MI);
+  LegalizeResult lowerSITOFP(MachineInstr &MI);
+  LegalizeResult lowerFPTOUI(MachineInstr &MI);
+  LegalizeResult lowerFPTOSI(MachineInstr &MI);
 
-  MachineRegisterInfo &MRI;
-  const LegalizerInfo &LI;
-  /// To keep track of changes made by the LegalizerHelper.
-  GISelChangeObserver &Observer;
+  LegalizeResult lowerFPTRUNC_F64_TO_F16(MachineInstr &MI);
+  LegalizeResult lowerFPTRUNC(MachineInstr &MI);
+  LegalizeResult lowerFPOWI(MachineInstr &MI);
+
+  LegalizeResult lowerMinMax(MachineInstr &MI);
+  LegalizeResult lowerFCopySign(MachineInstr &MI);
+  LegalizeResult lowerFMinNumMaxNum(MachineInstr &MI);
+  LegalizeResult lowerFMad(MachineInstr &MI);
+  LegalizeResult lowerIntrinsicRound(MachineInstr &MI);
+  LegalizeResult lowerFFloor(MachineInstr &MI);
+  LegalizeResult lowerMergeValues(MachineInstr &MI);
+  LegalizeResult lowerUnmergeValues(MachineInstr &MI);
+  LegalizeResult lowerExtractInsertVectorElt(MachineInstr &MI);
+  LegalizeResult lowerShuffleVector(MachineInstr &MI);
+  LegalizeResult lowerDynStackAlloc(MachineInstr &MI);
+  LegalizeResult lowerExtract(MachineInstr &MI);
+  LegalizeResult lowerInsert(MachineInstr &MI);
+  LegalizeResult lowerSADDO_SSUBO(MachineInstr &MI);
+  LegalizeResult lowerAddSubSatToMinMax(MachineInstr &MI);
+  LegalizeResult lowerAddSubSatToAddoSubo(MachineInstr &MI);
+  LegalizeResult lowerShlSat(MachineInstr &MI);
+  LegalizeResult lowerBswap(MachineInstr &MI);
+  LegalizeResult lowerBitreverse(MachineInstr &MI);
+  LegalizeResult lowerReadWriteRegister(MachineInstr &MI);
+  LegalizeResult lowerSMULH_UMULH(MachineInstr &MI);
+  LegalizeResult lowerSelect(MachineInstr &MI);
+
 };
 
+/// Helper function that creates a libcall to the given \p Name using the given
+/// calling convention \p CC.
+LegalizerHelper::LegalizeResult
+createLibcall(MachineIRBuilder &MIRBuilder, const char *Name,
+              const CallLowering::ArgInfo &Result,
+              ArrayRef<CallLowering::ArgInfo> Args, CallingConv::ID CC);
+
 /// Helper function that creates the given libcall.
 LegalizerHelper::LegalizeResult
 createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
               const CallLowering::ArgInfo &Result,
               ArrayRef<CallLowering::ArgInfo> Args);
 
+/// Create a libcall to memcpy et al.
+LegalizerHelper::LegalizeResult createMemLibcall(MachineIRBuilder &MIRBuilder,
+                                                 MachineRegisterInfo &MRI,
+                                                 MachineInstr &MI);
+
 } // End namespace llvm.
 
 #endif
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
index 513c98f..c8a54b8 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -22,8 +22,9 @@
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/CodeGen/TargetOpcodes.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/CommandLine.h"
 #include "llvm/Support/LowLevelTypeImpl.h"
+#include "llvm/Support/raw_ostream.h"
 #include <cassert>
 #include <cstdint>
 #include <tuple>
@@ -34,6 +35,7 @@
 
 extern cl::opt<bool> DisableGISelLegalityCheck;
 
+class LegalizerHelper;
 class MachineInstr;
 class MachineIRBuilder;
 class MachineRegisterInfo;
@@ -67,6 +69,9 @@
   /// the first two results.
   MoreElements,
 
+  /// Perform the operation on a different, but equivalently sized type.
+  Bitcast,
+
   /// The operation itself must be expressed in terms of simpler actions on
   /// this target. E.g. a SREM replaced by an SDIV and subtraction.
   Lower,
@@ -152,7 +157,7 @@
   LLT NewType;
 
   LegalizeActionStep(LegalizeAction Action, unsigned TypeIdx,
-                     const LLT &NewType)
+                     const LLT NewType)
       : Action(Action), TypeIdx(TypeIdx), NewType(NewType) {}
 
   bool operator==(const LegalizeActionStep &RHS) const {
@@ -178,7 +183,7 @@
            MemSize == Other.MemSize;
   }
 
-  /// \returns true if this memory access is legal with for the acecss described
+  /// \returns true if this memory access is legal with for the access described
   /// by \p Other (The alignment is sufficient for the size and result type).
   bool isCompatible(const TypePairAndMemDesc &Other) const {
     return Type0 == Other.Type0 && Type1 == Other.Type1 &&
@@ -199,11 +204,33 @@
 Predicate all(Predicate P0, Predicate P1, Args... args) {
   return all(all(P0, P1), args...);
 }
-/// True iff the given type index is the specified types.
+
+/// True iff P0 or P1 are true.
+template<typename Predicate>
+Predicate any(Predicate P0, Predicate P1) {
+  return [=](const LegalityQuery &Query) {
+    return P0(Query) || P1(Query);
+  };
+}
+/// True iff any given predicates are true.
+template<typename Predicate, typename... Args>
+Predicate any(Predicate P0, Predicate P1, Args... args) {
+  return any(any(P0, P1), args...);
+}
+
+/// True iff the given type index is the specified type.
 LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit);
 /// True iff the given type index is one of the specified types.
 LegalityPredicate typeInSet(unsigned TypeIdx,
                             std::initializer_list<LLT> TypesInit);
+
+/// True iff the given type index is not the specified type.
+inline LegalityPredicate typeIsNot(unsigned TypeIdx, LLT Type) {
+  return [=](const LegalityQuery &Query) {
+           return Query.Types[TypeIdx] != Type;
+         };
+}
+
 /// True iff the given types for the given pair of type indexes is one of the
 /// specified type pairs.
 LegalityPredicate
@@ -224,13 +251,16 @@
 /// space.
 LegalityPredicate isPointer(unsigned TypeIdx, unsigned AddrSpace);
 
+/// True if the type index is a vector with element type \p EltTy
+LegalityPredicate elementTypeIs(unsigned TypeIdx, LLT EltTy);
+
 /// True iff the specified type index is a scalar that's narrower than the given
 /// size.
-LegalityPredicate narrowerThan(unsigned TypeIdx, unsigned Size);
+LegalityPredicate scalarNarrowerThan(unsigned TypeIdx, unsigned Size);
 
 /// True iff the specified type index is a scalar that's wider than the given
 /// size.
-LegalityPredicate widerThan(unsigned TypeIdx, unsigned Size);
+LegalityPredicate scalarWiderThan(unsigned TypeIdx, unsigned Size);
 
 /// True iff the specified type index is a scalar or vector with an element type
 /// that's narrower than the given size.
@@ -248,8 +278,20 @@
 /// is not a power of 2.
 LegalityPredicate scalarOrEltSizeNotPow2(unsigned TypeIdx);
 
+/// True if the total bitwidth of the specified type index is \p Size bits.
+LegalityPredicate sizeIs(unsigned TypeIdx, unsigned Size);
+
 /// True iff the specified type indices are both the same bit size.
 LegalityPredicate sameSize(unsigned TypeIdx0, unsigned TypeIdx1);
+
+/// True iff the first type index has a larger total bit size than second type
+/// index.
+LegalityPredicate largerThan(unsigned TypeIdx0, unsigned TypeIdx1);
+
+/// True iff the first type index has a smaller total bit size than second type
+/// index.
+LegalityPredicate smallerThan(unsigned TypeIdx0, unsigned TypeIdx1);
+
 /// True iff the specified MMO index has a size that is not a power of 2
 LegalityPredicate memSizeInBytesNotPow2(unsigned MMOIdx);
 /// True iff the specified type index is a vector whose element count is not a
@@ -274,6 +316,11 @@
 /// Keep the same scalar or element type as the given type.
 LegalizeMutation changeElementTo(unsigned TypeIdx, LLT Ty);
 
+/// Change the scalar size or element size to have the same scalar size as type
+/// index \p FromIndex. Unlike changeElementTo, this discards pointer types and
+/// only changes the size.
+LegalizeMutation changeElementSizeTo(unsigned TypeIdx, unsigned FromTypeIdx);
+
 /// Widen the scalar type or vector element type for the given type index to the
 /// next power of 2.
 LegalizeMutation widenScalarOrEltToNextPow2(unsigned TypeIdx, unsigned Min = 0);
@@ -331,6 +378,8 @@
   /// individually handled.
   SmallBitVector TypeIdxsCovered{MCOI::OPERAND_LAST_GENERIC -
                                  MCOI::OPERAND_FIRST_GENERIC + 2};
+  SmallBitVector ImmIdxsCovered{MCOI::OPERAND_LAST_GENERIC_IMM -
+                                MCOI::OPERAND_FIRST_GENERIC_IMM + 2};
 #endif
 
   unsigned typeIdx(unsigned TypeIdx) {
@@ -342,9 +391,21 @@
 #endif
     return TypeIdx;
   }
-  void markAllTypeIdxsAsCovered() {
+
+  unsigned immIdx(unsigned ImmIdx) {
+    assert(ImmIdx <= (MCOI::OPERAND_LAST_GENERIC_IMM -
+                      MCOI::OPERAND_FIRST_GENERIC_IMM) &&
+           "Imm Index is out of bounds");
+#ifndef NDEBUG
+    ImmIdxsCovered.set(ImmIdx);
+#endif
+    return ImmIdx;
+  }
+
+  void markAllIdxsAsCovered() {
 #ifndef NDEBUG
     TypeIdxsCovered.set();
+    ImmIdxsCovered.set();
 #endif
   }
 
@@ -403,6 +464,23 @@
     return actionIf(Action, typePairInSet(typeIdx(0), typeIdx(1), Types),
                     Mutation);
   }
+  /// Use the given action when type index 0 is any type in the given list and
+  /// imm index 0 is anything. Action should not be an action that requires
+  /// mutation.
+  LegalizeRuleSet &actionForTypeWithAnyImm(LegalizeAction Action,
+                                           std::initializer_list<LLT> Types) {
+    using namespace LegalityPredicates;
+    immIdx(0); // Inform verifier imm idx 0 is handled.
+    return actionIf(Action, typeInSet(typeIdx(0), Types));
+  }
+
+  LegalizeRuleSet &actionForTypeWithAnyImm(
+    LegalizeAction Action, std::initializer_list<std::pair<LLT, LLT>> Types) {
+    using namespace LegalityPredicates;
+    immIdx(0); // Inform verifier imm idx 0 is handled.
+    return actionIf(Action, typePairInSet(typeIdx(0), typeIdx(1), Types));
+  }
+
   /// Use the given action when type indexes 0 and 1 are both in the given list.
   /// That is, the type pair is in the cartesian product of the list.
   /// Action should not be an action that requires mutation.
@@ -454,7 +532,7 @@
   LegalizeRuleSet &legalIf(LegalityPredicate Predicate) {
     // We have no choice but conservatively assume that the free-form
     // user-provided Predicate properly handles all type indices:
-    markAllTypeIdxsAsCovered();
+    markAllIdxsAsCovered();
     return actionIf(LegalizeAction::Legal, Predicate);
   }
   /// The instruction is legal when type index 0 is any type in the given list.
@@ -466,6 +544,19 @@
   LegalizeRuleSet &legalFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
     return actionFor(LegalizeAction::Legal, Types);
   }
+  /// The instruction is legal when type index 0 is any type in the given list
+  /// and imm index 0 is anything.
+  LegalizeRuleSet &legalForTypeWithAnyImm(std::initializer_list<LLT> Types) {
+    markAllIdxsAsCovered();
+    return actionForTypeWithAnyImm(LegalizeAction::Legal, Types);
+  }
+
+  LegalizeRuleSet &legalForTypeWithAnyImm(
+    std::initializer_list<std::pair<LLT, LLT>> Types) {
+    markAllIdxsAsCovered();
+    return actionForTypeWithAnyImm(LegalizeAction::Legal, Types);
+  }
+
   /// The instruction is legal when type indexes 0 and 1 along with the memory
   /// size and minimum alignment is any type and size tuple in the given list.
   LegalizeRuleSet &legalForTypesWithMemDesc(
@@ -497,16 +588,25 @@
 
   LegalizeRuleSet &alwaysLegal() {
     using namespace LegalizeMutations;
-    markAllTypeIdxsAsCovered();
+    markAllIdxsAsCovered();
     return actionIf(LegalizeAction::Legal, always);
   }
 
+  /// The specified type index is coerced if predicate is true.
+  LegalizeRuleSet &bitcastIf(LegalityPredicate Predicate,
+                             LegalizeMutation Mutation) {
+    // We have no choice but conservatively assume that lowering with a
+    // free-form user provided Predicate properly handles all type indices:
+    markAllIdxsAsCovered();
+    return actionIf(LegalizeAction::Bitcast, Predicate, Mutation);
+  }
+
   /// The instruction is lowered.
   LegalizeRuleSet &lower() {
     using namespace LegalizeMutations;
     // We have no choice but conservatively assume that predicate-less lowering
     // properly handles all type indices by design:
-    markAllTypeIdxsAsCovered();
+    markAllIdxsAsCovered();
     return actionIf(LegalizeAction::Lower, always);
   }
   /// The instruction is lowered if predicate is true. Keep type index 0 as the
@@ -515,7 +615,7 @@
     using namespace LegalizeMutations;
     // We have no choice but conservatively assume that lowering with a
     // free-form user provided Predicate properly handles all type indices:
-    markAllTypeIdxsAsCovered();
+    markAllIdxsAsCovered();
     return actionIf(LegalizeAction::Lower, Predicate);
   }
   /// The instruction is lowered if predicate is true.
@@ -523,14 +623,13 @@
                            LegalizeMutation Mutation) {
     // We have no choice but conservatively assume that lowering with a
     // free-form user provided Predicate properly handles all type indices:
-    markAllTypeIdxsAsCovered();
+    markAllIdxsAsCovered();
     return actionIf(LegalizeAction::Lower, Predicate, Mutation);
   }
   /// The instruction is lowered when type index 0 is any type in the given
   /// list. Keep type index 0 as the same type.
   LegalizeRuleSet &lowerFor(std::initializer_list<LLT> Types) {
-    return actionFor(LegalizeAction::Lower, Types,
-                     LegalizeMutations::changeTo(0, 0));
+    return actionFor(LegalizeAction::Lower, Types);
   }
   /// The instruction is lowered when type index 0 is any type in the given
   /// list.
@@ -541,8 +640,7 @@
   /// The instruction is lowered when type indexes 0 and 1 is any type pair in
   /// the given list. Keep type index 0 as the same type.
   LegalizeRuleSet &lowerFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
-    return actionFor(LegalizeAction::Lower, Types,
-                     LegalizeMutations::changeTo(0, 0));
+    return actionFor(LegalizeAction::Lower, Types);
   }
   /// The instruction is lowered when type indexes 0 and 1 is any type pair in
   /// the given list.
@@ -567,11 +665,20 @@
                                      Types2);
   }
 
+  /// The instruction is emitted as a library call.
+  LegalizeRuleSet &libcall() {
+    using namespace LegalizeMutations;
+    // We have no choice but conservatively assume that predicate-less lowering
+    // properly handles all type indices by design:
+    markAllIdxsAsCovered();
+    return actionIf(LegalizeAction::Libcall, always);
+  }
+
   /// Like legalIf, but for the Libcall action.
   LegalizeRuleSet &libcallIf(LegalityPredicate Predicate) {
     // We have no choice but conservatively assume that a libcall with a
     // free-form user provided Predicate properly handles all type indices:
-    markAllTypeIdxsAsCovered();
+    markAllIdxsAsCovered();
     return actionIf(LegalizeAction::Libcall, Predicate);
   }
   LegalizeRuleSet &libcallFor(std::initializer_list<LLT> Types) {
@@ -597,7 +704,7 @@
                                  LegalizeMutation Mutation) {
     // We have no choice but conservatively assume that an action with a
     // free-form user provided Predicate properly handles all type indices:
-    markAllTypeIdxsAsCovered();
+    markAllIdxsAsCovered();
     return actionIf(LegalizeAction::WidenScalar, Predicate, Mutation);
   }
   /// Narrow the scalar to the one selected by the mutation if the predicate is
@@ -606,9 +713,16 @@
                                   LegalizeMutation Mutation) {
     // We have no choice but conservatively assume that an action with a
     // free-form user provided Predicate properly handles all type indices:
-    markAllTypeIdxsAsCovered();
+    markAllIdxsAsCovered();
     return actionIf(LegalizeAction::NarrowScalar, Predicate, Mutation);
   }
+  /// Narrow the scalar, specified in mutation, when type indexes 0 and 1 is any
+  /// type pair in the given list.
+  LegalizeRuleSet &
+  narrowScalarFor(std::initializer_list<std::pair<LLT, LLT>> Types,
+                  LegalizeMutation Mutation) {
+    return actionFor(LegalizeAction::NarrowScalar, Types, Mutation);
+  }
 
   /// Add more elements to reach the type selected by the mutation if the
   /// predicate is true.
@@ -616,7 +730,7 @@
                                   LegalizeMutation Mutation) {
     // We have no choice but conservatively assume that an action with a
     // free-form user provided Predicate properly handles all type indices:
-    markAllTypeIdxsAsCovered();
+    markAllIdxsAsCovered();
     return actionIf(LegalizeAction::MoreElements, Predicate, Mutation);
   }
   /// Remove elements to reach the type selected by the mutation if the
@@ -625,26 +739,36 @@
                                    LegalizeMutation Mutation) {
     // We have no choice but conservatively assume that an action with a
     // free-form user provided Predicate properly handles all type indices:
-    markAllTypeIdxsAsCovered();
+    markAllIdxsAsCovered();
     return actionIf(LegalizeAction::FewerElements, Predicate, Mutation);
   }
 
   /// The instruction is unsupported.
   LegalizeRuleSet &unsupported() {
+    markAllIdxsAsCovered();
     return actionIf(LegalizeAction::Unsupported, always);
   }
   LegalizeRuleSet &unsupportedIf(LegalityPredicate Predicate) {
     return actionIf(LegalizeAction::Unsupported, Predicate);
   }
+
+  LegalizeRuleSet &unsupportedFor(std::initializer_list<LLT> Types) {
+    return actionFor(LegalizeAction::Unsupported, Types);
+  }
+
   LegalizeRuleSet &unsupportedIfMemSizeNotPow2() {
     return actionIf(LegalizeAction::Unsupported,
                     LegalityPredicates::memSizeInBytesNotPow2(0));
   }
+  LegalizeRuleSet &lowerIfMemSizeNotPow2() {
+    return actionIf(LegalizeAction::Lower,
+                    LegalityPredicates::memSizeInBytesNotPow2(0));
+  }
 
   LegalizeRuleSet &customIf(LegalityPredicate Predicate) {
     // We have no choice but conservatively assume that a custom action with a
     // free-form user provided Predicate properly handles all type indices:
-    markAllTypeIdxsAsCovered();
+    markAllIdxsAsCovered();
     return actionIf(LegalizeAction::Custom, Predicate);
   }
   LegalizeRuleSet &customFor(std::initializer_list<LLT> Types) {
@@ -703,8 +827,15 @@
                     LegalizeMutations::scalarize(TypeIdx));
   }
 
+  LegalizeRuleSet &scalarizeIf(LegalityPredicate Predicate, unsigned TypeIdx) {
+    using namespace LegalityPredicates;
+    return actionIf(LegalizeAction::FewerElements,
+                    all(Predicate, isVector(typeIdx(TypeIdx))),
+                    LegalizeMutations::scalarize(TypeIdx));
+  }
+
   /// Ensure the scalar or element is at least as wide as Ty.
-  LegalizeRuleSet &minScalarOrElt(unsigned TypeIdx, const LLT &Ty) {
+  LegalizeRuleSet &minScalarOrElt(unsigned TypeIdx, const LLT Ty) {
     using namespace LegalityPredicates;
     using namespace LegalizeMutations;
     return actionIf(LegalizeAction::WidenScalar,
@@ -714,7 +845,7 @@
 
   /// Ensure the scalar or element is at least as wide as Ty.
   LegalizeRuleSet &minScalarOrEltIf(LegalityPredicate Predicate,
-                                    unsigned TypeIdx, const LLT &Ty) {
+                                    unsigned TypeIdx, const LLT Ty) {
     using namespace LegalityPredicates;
     using namespace LegalizeMutations;
     return actionIf(LegalizeAction::WidenScalar,
@@ -724,16 +855,16 @@
   }
 
   /// Ensure the scalar is at least as wide as Ty.
-  LegalizeRuleSet &minScalar(unsigned TypeIdx, const LLT &Ty) {
+  LegalizeRuleSet &minScalar(unsigned TypeIdx, const LLT Ty) {
     using namespace LegalityPredicates;
     using namespace LegalizeMutations;
     return actionIf(LegalizeAction::WidenScalar,
-                    narrowerThan(TypeIdx, Ty.getSizeInBits()),
+                    scalarNarrowerThan(TypeIdx, Ty.getSizeInBits()),
                     changeTo(typeIdx(TypeIdx), Ty));
   }
 
   /// Ensure the scalar is at most as wide as Ty.
-  LegalizeRuleSet &maxScalarOrElt(unsigned TypeIdx, const LLT &Ty) {
+  LegalizeRuleSet &maxScalarOrElt(unsigned TypeIdx, const LLT Ty) {
     using namespace LegalityPredicates;
     using namespace LegalizeMutations;
     return actionIf(LegalizeAction::NarrowScalar,
@@ -742,11 +873,11 @@
   }
 
   /// Ensure the scalar is at most as wide as Ty.
-  LegalizeRuleSet &maxScalar(unsigned TypeIdx, const LLT &Ty) {
+  LegalizeRuleSet &maxScalar(unsigned TypeIdx, const LLT Ty) {
     using namespace LegalityPredicates;
     using namespace LegalizeMutations;
     return actionIf(LegalizeAction::NarrowScalar,
-                    widerThan(TypeIdx, Ty.getSizeInBits()),
+                    scalarWiderThan(TypeIdx, Ty.getSizeInBits()),
                     changeTo(typeIdx(TypeIdx), Ty));
   }
 
@@ -754,27 +885,30 @@
   /// For example, when the maximum size of one type depends on the size of
   /// another such as extracting N bits from an M bit container.
   LegalizeRuleSet &maxScalarIf(LegalityPredicate Predicate, unsigned TypeIdx,
-                               const LLT &Ty) {
+                               const LLT Ty) {
     using namespace LegalityPredicates;
     using namespace LegalizeMutations;
     return actionIf(
         LegalizeAction::NarrowScalar,
         [=](const LegalityQuery &Query) {
-          return widerThan(TypeIdx, Ty.getSizeInBits()) && Predicate(Query);
+          const LLT QueryTy = Query.Types[TypeIdx];
+          return QueryTy.isScalar() &&
+                 QueryTy.getSizeInBits() > Ty.getSizeInBits() &&
+                 Predicate(Query);
         },
         changeElementTo(typeIdx(TypeIdx), Ty));
   }
 
   /// Limit the range of scalar sizes to MinTy and MaxTy.
-  LegalizeRuleSet &clampScalar(unsigned TypeIdx, const LLT &MinTy,
-                               const LLT &MaxTy) {
+  LegalizeRuleSet &clampScalar(unsigned TypeIdx, const LLT MinTy,
+                               const LLT MaxTy) {
     assert(MinTy.isScalar() && MaxTy.isScalar() && "Expected scalar types");
     return minScalar(TypeIdx, MinTy).maxScalar(TypeIdx, MaxTy);
   }
 
   /// Limit the range of scalar sizes to MinTy and MaxTy.
-  LegalizeRuleSet &clampScalarOrElt(unsigned TypeIdx, const LLT &MinTy,
-                                    const LLT &MaxTy) {
+  LegalizeRuleSet &clampScalarOrElt(unsigned TypeIdx, const LLT MinTy,
+                                    const LLT MaxTy) {
     return minScalarOrElt(TypeIdx, MinTy).maxScalarOrElt(TypeIdx, MaxTy);
   }
 
@@ -786,11 +920,25 @@
           return Query.Types[LargeTypeIdx].getScalarSizeInBits() >
                  Query.Types[TypeIdx].getSizeInBits();
         },
+        LegalizeMutations::changeElementSizeTo(TypeIdx, LargeTypeIdx));
+  }
+
+  /// Narrow the scalar to match the size of another.
+  LegalizeRuleSet &maxScalarSameAs(unsigned TypeIdx, unsigned NarrowTypeIdx) {
+    typeIdx(TypeIdx);
+    return narrowScalarIf(
         [=](const LegalityQuery &Query) {
-          LLT T = Query.Types[LargeTypeIdx];
-          return std::make_pair(TypeIdx,
-                                T.isVector() ? T.getElementType() : T);
-        });
+          return Query.Types[NarrowTypeIdx].getScalarSizeInBits() <
+                 Query.Types[TypeIdx].getSizeInBits();
+        },
+        LegalizeMutations::changeElementSizeTo(TypeIdx, NarrowTypeIdx));
+  }
+
+  /// Change the type \p TypeIdx to have the same scalar size as type \p
+  /// SameSizeIdx.
+  LegalizeRuleSet &scalarSameSizeAs(unsigned TypeIdx, unsigned SameSizeIdx) {
+    return minScalarSameAs(TypeIdx, SameSizeIdx)
+          .maxScalarSameAs(TypeIdx, SameSizeIdx);
   }
 
   /// Conditionally widen the scalar or elt to match the size of another.
@@ -820,7 +968,7 @@
   }
 
   /// Limit the number of elements in EltTy vectors to at least MinElements.
-  LegalizeRuleSet &clampMinNumElements(unsigned TypeIdx, const LLT &EltTy,
+  LegalizeRuleSet &clampMinNumElements(unsigned TypeIdx, const LLT EltTy,
                                        unsigned MinElements) {
     // Mark the type index as covered:
     typeIdx(TypeIdx);
@@ -838,7 +986,7 @@
         });
   }
   /// Limit the number of elements in EltTy vectors to at most MaxElements.
-  LegalizeRuleSet &clampMaxNumElements(unsigned TypeIdx, const LLT &EltTy,
+  LegalizeRuleSet &clampMaxNumElements(unsigned TypeIdx, const LLT EltTy,
                                        unsigned MaxElements) {
     // Mark the type index as covered:
     typeIdx(TypeIdx);
@@ -861,12 +1009,12 @@
   /// No effect if the type is not a vector or does not have the same element
   /// type as the constraints.
   /// The element type of MinTy and MaxTy must match.
-  LegalizeRuleSet &clampNumElements(unsigned TypeIdx, const LLT &MinTy,
-                                    const LLT &MaxTy) {
+  LegalizeRuleSet &clampNumElements(unsigned TypeIdx, const LLT MinTy,
+                                    const LLT MaxTy) {
     assert(MinTy.getElementType() == MaxTy.getElementType() &&
            "Expected element types to agree");
 
-    const LLT &EltTy = MinTy.getElementType();
+    const LLT EltTy = MinTy.getElementType();
     return clampMinNumElements(TypeIdx, EltTy, MinTy.getNumElements())
         .clampMaxNumElements(TypeIdx, EltTy, MaxTy.getNumElements());
   }
@@ -882,6 +1030,10 @@
   /// LegalizeRuleSet in any way at all.
   /// \pre Type indices of the opcode form a dense [0, \p NumTypeIdxs) set.
   bool verifyTypeIdxsCoverage(unsigned NumTypeIdxs) const;
+  /// Check if there is no imm index which is obviously not handled by the
+  /// LegalizeRuleSet in any way at all.
+  /// \pre Type indices of the opcode form a dense [0, \p NumTypeIdxs) set.
+  bool verifyImmIdxsCoverage(unsigned NumImmIdxs) const;
 
   /// Apply the ruleset to the given LegalityQuery.
   LegalizeActionStep apply(const LegalityQuery &Query) const;
@@ -1106,18 +1258,36 @@
   bool isLegal(const LegalityQuery &Query) const {
     return getAction(Query).Action == LegalizeAction::Legal;
   }
+
+  bool isLegalOrCustom(const LegalityQuery &Query) const {
+    auto Action = getAction(Query).Action;
+    return Action == LegalizeAction::Legal || Action == LegalizeAction::Custom;
+  }
+
   bool isLegal(const MachineInstr &MI, const MachineRegisterInfo &MRI) const;
   bool isLegalOrCustom(const MachineInstr &MI,
                        const MachineRegisterInfo &MRI) const;
 
-  virtual bool legalizeCustom(MachineInstr &MI, MachineRegisterInfo &MRI,
-                              MachineIRBuilder &MIRBuilder,
-                              GISelChangeObserver &Observer) const;
+  /// Called for instructions with the Custom LegalizationAction.
+  virtual bool legalizeCustom(LegalizerHelper &Helper,
+                              MachineInstr &MI) const {
+    llvm_unreachable("must implement this if custom action is used");
+  }
 
+  /// \returns true if MI is either legal or has been legalized and false if not
+  /// legal.
   /// Return true if MI is either legal or has been legalized and false
   /// if not legal.
-  virtual bool legalizeIntrinsic(MachineInstr &MI, MachineRegisterInfo &MRI,
-                                 MachineIRBuilder &MIRBuilder) const;
+  virtual bool legalizeIntrinsic(LegalizerHelper &Helper,
+                                 MachineInstr &MI) const {
+    return true;
+  }
+
+  /// Return the opcode (SEXT/ZEXT/ANYEXT) that should be performed while
+  /// widening a constant of type SmallTy which targets can override.
+  /// For eg, the DAG does (SmallTy.isByteSized() ? G_SEXT : G_ZEXT) which
+  /// will be the default.
+  virtual unsigned getExtOpcodeForWideningConstant(LLT SmallTy) const;
 
 private:
   /// Determine what action should be taken to legalize the given generic
@@ -1141,7 +1311,7 @@
   ///            {65, NarrowScalar} // bit sizes [65, +inf[
   ///           });
   /// It may be that only 64-bit pointers are supported on your target:
-  /// setPointerAction(G_GEP, 0, LLT:pointer(1),
+  /// setPointerAction(G_PTR_ADD, 0, LLT:pointer(1),
   ///           {{1, Unsupported},  // bit sizes [ 1, 63[
   ///            {64, Legal},       // bit sizes [64, 65[
   ///            {65, Unsupported}, // bit sizes [65, +inf[
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h
index 06de580..57f6c03 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h
@@ -42,15 +42,16 @@
   static char ID;
 
 private:
+  /// An input function to decide if the pass should run or not
+  /// on the given MachineFunction.
+  std::function<bool(const MachineFunction &)> DoNotRunPass;
+
   /// MRI contains all the register class/bank information that this
   /// pass uses and updates.
   MachineRegisterInfo *MRI;
   /// TTI used for getting remat costs for instructions.
   TargetTransformInfo *TTI;
 
-  /// Check whether or not \p MI needs to be moved close to its uses.
-  bool shouldLocalize(const MachineInstr &MI);
-
   /// Check if \p MOUse is used in the same basic block as \p Def.
   /// If the use is in the same block, we say it is local.
   /// When the use is not local, \p InsertMBB will contain the basic
@@ -72,14 +73,13 @@
 
 public:
   Localizer();
+  Localizer(std::function<bool(const MachineFunction &)>);
 
   StringRef getPassName() const override { return "Localizer"; }
 
   MachineFunctionProperties getRequiredProperties() const override {
     return MachineFunctionProperties()
-        .set(MachineFunctionProperties::Property::IsSSA)
-        .set(MachineFunctionProperties::Property::Legalized)
-        .set(MachineFunctionProperties::Property::RegBankSelected);
+        .set(MachineFunctionProperties::Property::IsSSA);
   }
 
   void getAnalysisUsage(AnalysisUsage &AU) const override;
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LostDebugLocObserver.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LostDebugLocObserver.h
new file mode 100644
index 0000000..cd2a871
--- /dev/null
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LostDebugLocObserver.h
@@ -0,0 +1,50 @@
+//===----- llvm/CodeGen/GlobalISel/LostDebugLocObserver.h -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// Tracks DebugLocs between checkpoints and verifies that they are transferred.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CODEGEN_GLOBALISEL_LOSTDEBUGLOCOBSERVER_H
+#define LLVM_CODEGEN_GLOBALISEL_LOSTDEBUGLOCOBSERVER_H
+
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
+
+namespace llvm {
+class LostDebugLocObserver : public GISelChangeObserver {
+  StringRef DebugType;
+  SmallSet<DebugLoc, 4> LostDebugLocs;
+  SmallPtrSet<MachineInstr *, 4> PotentialMIsForDebugLocs;
+  unsigned NumLostDebugLocs = 0;
+
+public:
+  LostDebugLocObserver(StringRef DebugType) : DebugType(DebugType) {}
+
+  unsigned getNumLostDebugLocs() const { return NumLostDebugLocs; }
+
+  /// Call this to indicate that it's a good point to assess whether locations
+  /// have been lost. Typically this will be when a logical change has been
+  /// completed such as the caller has finished replacing some instructions with
+  /// alternatives. When CheckDebugLocs is true, the locations will be checked
+  /// to see if any have been lost since the last checkpoint. When
+  /// CheckDebugLocs is false, it will just reset ready for the next checkpoint
+  /// without checking anything. This can be helpful to limit the detection to
+  /// easy-to-fix portions of an algorithm before allowing more difficult ones.
+  void checkpoint(bool CheckDebugLocs = true);
+
+  void createdInstr(MachineInstr &MI) override;
+  void erasingInstr(MachineInstr &MI) override;
+  void changingInstr(MachineInstr &MI) override;
+  void changedInstr(MachineInstr &MI) override;
+
+private:
+  void analyzeDebugLocations();
+};
+
+} // namespace llvm
+#endif // ifndef LLVM_CODEGEN_GLOBALISEL_LOSTDEBUGLOCOBSERVER_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
index 13eddd9..55d6d36 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
@@ -12,16 +12,15 @@
 #ifndef LLVM_GMIR_PATTERNMATCH_H
 #define LLVM_GMIR_PATTERNMATCH_H
 
-#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/APInt.h"
 #include "llvm/CodeGen/GlobalISel/Utils.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/InstrTypes.h"
 
 namespace llvm {
 namespace MIPatternMatch {
 
 template <typename Reg, typename Pattern>
-bool mi_match(Reg R, MachineRegisterInfo &MRI, Pattern &&P) {
+bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P) {
   return P.match(MRI, R);
 }
 
@@ -30,7 +29,7 @@
   SubPatternT SubPat;
   OneUse_match(const SubPatternT &SP) : SubPat(SP) {}
 
-  bool match(MachineRegisterInfo &MRI, unsigned Reg) {
+  bool match(const MachineRegisterInfo &MRI, Register Reg) {
     return MRI.hasOneUse(Reg) && SubPat.match(MRI, Reg);
   }
 };
@@ -40,11 +39,25 @@
   return SP;
 }
 
+template <typename SubPatternT> struct OneNonDBGUse_match {
+  SubPatternT SubPat;
+  OneNonDBGUse_match(const SubPatternT &SP) : SubPat(SP) {}
+
+  bool match(const MachineRegisterInfo &MRI, Register Reg) {
+    return MRI.hasOneNonDBGUse(Reg) && SubPat.match(MRI, Reg);
+  }
+};
+
+template <typename SubPat>
+inline OneNonDBGUse_match<SubPat> m_OneNonDBGUse(const SubPat &SP) {
+  return SP;
+}
+
 struct ConstantMatch {
   int64_t &CR;
   ConstantMatch(int64_t &C) : CR(C) {}
-  bool match(const MachineRegisterInfo &MRI, unsigned Reg) {
-    if (auto MaybeCst = getConstantVRegVal(Reg, MRI)) {
+  bool match(const MachineRegisterInfo &MRI, Register Reg) {
+    if (auto MaybeCst = getConstantVRegSExtVal(Reg, MRI)) {
       CR = *MaybeCst;
       return true;
     }
@@ -54,13 +67,36 @@
 
 inline ConstantMatch m_ICst(int64_t &Cst) { return ConstantMatch(Cst); }
 
+/// Matcher for a specific constant value.
+struct SpecificConstantMatch {
+  int64_t RequestedVal;
+  SpecificConstantMatch(int64_t RequestedVal) : RequestedVal(RequestedVal) {}
+  bool match(const MachineRegisterInfo &MRI, Register Reg) {
+    int64_t MatchedVal;
+    return mi_match(Reg, MRI, m_ICst(MatchedVal)) && MatchedVal == RequestedVal;
+  }
+};
+
+/// Matches a constant equal to \p RequestedValue.
+inline SpecificConstantMatch m_SpecificICst(int64_t RequestedValue) {
+  return SpecificConstantMatch(RequestedValue);
+}
+
+///{
+/// Convenience matchers for specific integer values.
+inline SpecificConstantMatch m_ZeroInt() { return SpecificConstantMatch(0); }
+inline SpecificConstantMatch m_AllOnesInt() {
+  return SpecificConstantMatch(-1);
+}
+///}
+
 // TODO: Rework this for different kinds of MachineOperand.
 // Currently assumes the Src for a match is a register.
 // We might want to support taking in some MachineOperands and call getReg on
 // that.
 
 struct operand_type_match {
-  bool match(const MachineRegisterInfo &MRI, unsigned Reg) { return true; }
+  bool match(const MachineRegisterInfo &MRI, Register Reg) { return true; }
   bool match(const MachineRegisterInfo &MRI, MachineOperand *MO) {
     return MO->isReg();
   }
@@ -71,7 +107,7 @@
 /// Matching combinators.
 template <typename... Preds> struct And {
   template <typename MatchSrc>
-  bool match(MachineRegisterInfo &MRI, MatchSrc &&src) {
+  bool match(const MachineRegisterInfo &MRI, MatchSrc &&src) {
     return true;
   }
 };
@@ -83,14 +119,14 @@
       : And<Preds...>(std::forward<Preds>(preds)...), P(std::forward<Pred>(p)) {
   }
   template <typename MatchSrc>
-  bool match(MachineRegisterInfo &MRI, MatchSrc &&src) {
+  bool match(const MachineRegisterInfo &MRI, MatchSrc &&src) {
     return P.match(MRI, src) && And<Preds...>::match(MRI, src);
   }
 };
 
 template <typename... Preds> struct Or {
   template <typename MatchSrc>
-  bool match(MachineRegisterInfo &MRI, MatchSrc &&src) {
+  bool match(const MachineRegisterInfo &MRI, MatchSrc &&src) {
     return false;
   }
 };
@@ -101,7 +137,7 @@
   Or(Pred &&p, Preds &&... preds)
       : Or<Preds...>(std::forward<Preds>(preds)...), P(std::forward<Pred>(p)) {}
   template <typename MatchSrc>
-  bool match(MachineRegisterInfo &MRI, MatchSrc &&src) {
+  bool match(const MachineRegisterInfo &MRI, MatchSrc &&src) {
     return P.match(MRI, src) || Or<Preds...>::match(MRI, src);
   }
 };
@@ -123,7 +159,7 @@
 
 template <> struct bind_helper<MachineInstr *> {
   static bool bind(const MachineRegisterInfo &MRI, MachineInstr *&MI,
-                   unsigned Reg) {
+                   Register Reg) {
     MI = MRI.getVRegDef(Reg);
     if (MI)
       return true;
@@ -132,7 +168,7 @@
 };
 
 template <> struct bind_helper<LLT> {
-  static bool bind(const MachineRegisterInfo &MRI, LLT &Ty, unsigned Reg) {
+  static bool bind(const MachineRegisterInfo &MRI, LLT Ty, Register Reg) {
     Ty = MRI.getType(Reg);
     if (Ty.isValid())
       return true;
@@ -142,7 +178,7 @@
 
 template <> struct bind_helper<const ConstantFP *> {
   static bool bind(const MachineRegisterInfo &MRI, const ConstantFP *&F,
-                   unsigned Reg) {
+                   Register Reg) {
     F = getConstantFPVRegVal(Reg, MRI);
     if (F)
       return true;
@@ -162,7 +198,9 @@
 
 inline bind_ty<Register> m_Reg(Register &R) { return R; }
 inline bind_ty<MachineInstr *> m_MInstr(MachineInstr *&MI) { return MI; }
-inline bind_ty<LLT> m_Type(LLT &Ty) { return Ty; }
+inline bind_ty<LLT> m_Type(LLT Ty) { return Ty; }
+inline bind_ty<CmpInst::Predicate> m_Pred(CmpInst::Predicate &P) { return P; }
+inline operand_type_match m_Pred() { return operand_type_match(); }
 
 // Helper for matching G_FCONSTANT
 inline bind_ty<const ConstantFP *> m_GFCst(const ConstantFP *&C) { return C; }
@@ -175,7 +213,8 @@
   RHS_P R;
 
   BinaryOp_match(const LHS_P &LHS, const RHS_P &RHS) : L(LHS), R(RHS) {}
-  template <typename OpTy> bool match(MachineRegisterInfo &MRI, OpTy &&Op) {
+  template <typename OpTy>
+  bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
     MachineInstr *TmpMI;
     if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
       if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 3) {
@@ -196,6 +235,12 @@
 }
 
 template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_PTR_ADD, true>
+m_GPtrAdd(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_PTR_ADD, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
 inline BinaryOp_match<LHS, RHS, TargetOpcode::G_SUB> m_GSub(const LHS &L,
                                                             const RHS &R) {
   return BinaryOp_match<LHS, RHS, TargetOpcode::G_SUB>(L, R);
@@ -232,17 +277,42 @@
 }
 
 template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_XOR, true>
+m_GXor(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_XOR, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
 inline BinaryOp_match<LHS, RHS, TargetOpcode::G_OR, true> m_GOr(const LHS &L,
                                                                 const RHS &R) {
   return BinaryOp_match<LHS, RHS, TargetOpcode::G_OR, true>(L, R);
 }
 
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_SHL, false>
+m_GShl(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_SHL, false>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_LSHR, false>
+m_GLShr(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_LSHR, false>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_ASHR, false>
+m_GAShr(const LHS &L, const RHS &R) {
+  return BinaryOp_match<LHS, RHS, TargetOpcode::G_ASHR, false>(L, R);
+}
+
 // Helper for unary instructions (G_[ZSA]EXT/G_TRUNC) etc
 template <typename SrcTy, unsigned Opcode> struct UnaryOp_match {
   SrcTy L;
 
   UnaryOp_match(const SrcTy &LHS) : L(LHS) {}
-  template <typename OpTy> bool match(MachineRegisterInfo &MRI, OpTy &&Op) {
+  template <typename OpTy>
+  bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
     MachineInstr *TmpMI;
     if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
       if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 2) {
@@ -318,18 +388,102 @@
   return UnaryOp_match<SrcTy, TargetOpcode::COPY>(std::forward<SrcTy>(Src));
 }
 
+// General helper for generic MI compares, i.e. G_ICMP and G_FCMP
+// TODO: Allow checking a specific predicate.
+template <typename Pred_P, typename LHS_P, typename RHS_P, unsigned Opcode>
+struct CompareOp_match {
+  Pred_P P;
+  LHS_P L;
+  RHS_P R;
+
+  CompareOp_match(const Pred_P &Pred, const LHS_P &LHS, const RHS_P &RHS)
+      : P(Pred), L(LHS), R(RHS) {}
+
+  template <typename OpTy>
+  bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
+    MachineInstr *TmpMI;
+    if (!mi_match(Op, MRI, m_MInstr(TmpMI)) || TmpMI->getOpcode() != Opcode)
+      return false;
+
+    auto TmpPred =
+        static_cast<CmpInst::Predicate>(TmpMI->getOperand(1).getPredicate());
+    if (!P.match(MRI, TmpPred))
+      return false;
+
+    return L.match(MRI, TmpMI->getOperand(2).getReg()) &&
+           R.match(MRI, TmpMI->getOperand(3).getReg());
+  }
+};
+
+template <typename Pred, typename LHS, typename RHS>
+inline CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_ICMP>
+m_GICmp(const Pred &P, const LHS &L, const RHS &R) {
+  return CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_ICMP>(P, L, R);
+}
+
+template <typename Pred, typename LHS, typename RHS>
+inline CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_FCMP>
+m_GFCmp(const Pred &P, const LHS &L, const RHS &R) {
+  return CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_FCMP>(P, L, R);
+}
+
 // Helper for checking if a Reg is of specific type.
 struct CheckType {
   LLT Ty;
-  CheckType(const LLT &Ty) : Ty(Ty) {}
+  CheckType(const LLT Ty) : Ty(Ty) {}
 
-  bool match(MachineRegisterInfo &MRI, unsigned Reg) {
+  bool match(const MachineRegisterInfo &MRI, Register Reg) {
     return MRI.getType(Reg) == Ty;
   }
 };
 
 inline CheckType m_SpecificType(LLT Ty) { return Ty; }
 
+template <typename Src0Ty, typename Src1Ty, typename Src2Ty, unsigned Opcode>
+struct TernaryOp_match {
+  Src0Ty Src0;
+  Src1Ty Src1;
+  Src2Ty Src2;
+
+  TernaryOp_match(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2)
+      : Src0(Src0), Src1(Src1), Src2(Src2) {}
+  template <typename OpTy>
+  bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
+    MachineInstr *TmpMI;
+    if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
+      if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 4) {
+        return (Src0.match(MRI, TmpMI->getOperand(1).getReg()) &&
+                Src1.match(MRI, TmpMI->getOperand(2).getReg()) &&
+                Src2.match(MRI, TmpMI->getOperand(3).getReg()));
+      }
+    }
+    return false;
+  }
+};
+template <typename Src0Ty, typename Src1Ty, typename Src2Ty>
+inline TernaryOp_match<Src0Ty, Src1Ty, Src2Ty,
+                       TargetOpcode::G_INSERT_VECTOR_ELT>
+m_GInsertVecElt(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) {
+  return TernaryOp_match<Src0Ty, Src1Ty, Src2Ty,
+                         TargetOpcode::G_INSERT_VECTOR_ELT>(Src0, Src1, Src2);
+}
+
+/// Matches a register negated by a G_SUB.
+/// G_SUB 0, %negated_reg
+template <typename SrcTy>
+inline BinaryOp_match<SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB>
+m_Neg(const SrcTy &&Src) {
+  return m_GSub(m_ZeroInt(), Src);
+}
+
+/// Matches a register not-ed by a G_XOR.
+/// G_XOR %not_reg, -1
+template <typename SrcTy>
+inline BinaryOp_match<SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true>
+m_Not(const SrcTy &&Src) {
+  return m_GXor(Src, m_AllOnesInt());
+}
+
 } // namespace GMIPatternMatch
 } // namespace llvm
 
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index da3c478..1ab4cd7 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -14,15 +14,14 @@
 #define LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H
 
 #include "llvm/CodeGen/GlobalISel/CSEInfo.h"
-#include "llvm/CodeGen/GlobalISel/Types.h"
-
 #include "llvm/CodeGen/LowLevelType.h"
 #include "llvm/CodeGen/MachineBasicBlock.h"
 #include "llvm/CodeGen/MachineInstrBuilder.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/DebugLoc.h"
-
+#include "llvm/IR/Module.h"
 
 namespace llvm {
 
@@ -37,23 +36,23 @@
 /// to transfer BuilderState between different kinds of MachineIRBuilders.
 struct MachineIRBuilderState {
   /// MachineFunction under construction.
-  MachineFunction *MF;
+  MachineFunction *MF = nullptr;
   /// Information used to access the description of the opcodes.
-  const TargetInstrInfo *TII;
+  const TargetInstrInfo *TII = nullptr;
   /// Information used to verify types are consistent and to create virtual registers.
-  MachineRegisterInfo *MRI;
+  MachineRegisterInfo *MRI = nullptr;
   /// Debug location to be set to any instruction we create.
   DebugLoc DL;
 
   /// \name Fields describing the insertion point.
   /// @{
-  MachineBasicBlock *MBB;
+  MachineBasicBlock *MBB = nullptr;
   MachineBasicBlock::iterator II;
   /// @}
 
-  GISelChangeObserver *Observer;
+  GISelChangeObserver *Observer = nullptr;
 
-  GISelCSEInfo *CSEInfo;
+  GISelCSEInfo *CSEInfo = nullptr;
 };
 
 class DstOp {
@@ -68,7 +67,7 @@
   DstOp(unsigned R) : Reg(R), Ty(DstType::Ty_Reg) {}
   DstOp(Register R) : Reg(R), Ty(DstType::Ty_Reg) {}
   DstOp(const MachineOperand &Op) : Reg(Op.getReg()), Ty(DstType::Ty_Reg) {}
-  DstOp(const LLT &T) : LLTTy(T), Ty(DstType::Ty_LLT) {}
+  DstOp(const LLT T) : LLTTy(T), Ty(DstType::Ty_LLT) {}
   DstOp(const TargetRegisterClass *TRC) : RC(TRC), Ty(DstType::Ty_RC) {}
 
   void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const {
@@ -122,14 +121,22 @@
     MachineInstrBuilder SrcMIB;
     Register Reg;
     CmpInst::Predicate Pred;
+    int64_t Imm;
   };
 
 public:
-  enum class SrcType { Ty_Reg, Ty_MIB, Ty_Predicate };
+  enum class SrcType { Ty_Reg, Ty_MIB, Ty_Predicate, Ty_Imm };
   SrcOp(Register R) : Reg(R), Ty(SrcType::Ty_Reg) {}
   SrcOp(const MachineOperand &Op) : Reg(Op.getReg()), Ty(SrcType::Ty_Reg) {}
   SrcOp(const MachineInstrBuilder &MIB) : SrcMIB(MIB), Ty(SrcType::Ty_MIB) {}
   SrcOp(const CmpInst::Predicate P) : Pred(P), Ty(SrcType::Ty_Predicate) {}
+  /// Use of registers held in unsigned integer variables (or more rarely signed
+  /// integers) is no longer permitted to avoid ambiguity with upcoming support
+  /// for immediates.
+  SrcOp(unsigned) = delete;
+  SrcOp(int) = delete;
+  SrcOp(uint64_t V) : Imm(V), Ty(SrcType::Ty_Imm) {}
+  SrcOp(int64_t V) : Imm(V), Ty(SrcType::Ty_Imm) {}
 
   void addSrcToMIB(MachineInstrBuilder &MIB) const {
     switch (Ty) {
@@ -142,12 +149,16 @@
     case SrcType::Ty_MIB:
       MIB.addUse(SrcMIB->getOperand(0).getReg());
       break;
+    case SrcType::Ty_Imm:
+      MIB.addImm(Imm);
+      break;
     }
   }
 
   LLT getLLTTy(const MachineRegisterInfo &MRI) const {
     switch (Ty) {
     case SrcType::Ty_Predicate:
+    case SrcType::Ty_Imm:
       llvm_unreachable("Not a register operand");
     case SrcType::Ty_Reg:
       return MRI.getType(Reg);
@@ -160,6 +171,7 @@
   Register getReg() const {
     switch (Ty) {
     case SrcType::Ty_Predicate:
+    case SrcType::Ty_Imm:
       llvm_unreachable("Not a register operand");
     case SrcType::Ty_Reg:
       return Reg;
@@ -178,6 +190,15 @@
     }
   }
 
+  int64_t getImm() const {
+    switch (Ty) {
+    case SrcType::Ty_Imm:
+      return Imm;
+    default:
+      llvm_unreachable("Not an immediate");
+    }
+  }
+
   SrcType getSrcOpKind() const { return Ty; }
 
 private:
@@ -201,21 +222,39 @@
   MachineIRBuilderState State;
 
 protected:
-  void validateTruncExt(const LLT &Dst, const LLT &Src, bool IsExtend);
+  void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend);
 
-  void validateBinaryOp(const LLT &Res, const LLT &Op0, const LLT &Op1);
-  void validateShiftOp(const LLT &Res, const LLT &Op0, const LLT &Op1);
+  void validateUnaryOp(const LLT Res, const LLT Op0);
+  void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1);
+  void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1);
 
-  void validateSelectOp(const LLT &ResTy, const LLT &TstTy, const LLT &Op0Ty,
-                        const LLT &Op1Ty);
-  void recordInsertion(MachineInstr *MI) const;
+  void validateSelectOp(const LLT ResTy, const LLT TstTy, const LLT Op0Ty,
+                        const LLT Op1Ty);
+
+  void recordInsertion(MachineInstr *InsertedInstr) const {
+    if (State.Observer)
+      State.Observer->createdInstr(*InsertedInstr);
+  }
 
 public:
   /// Some constructors for easy use.
   MachineIRBuilder() = default;
   MachineIRBuilder(MachineFunction &MF) { setMF(MF); }
-  MachineIRBuilder(MachineInstr &MI) : MachineIRBuilder(*MI.getMF()) {
+
+  MachineIRBuilder(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt) {
+    setMF(*MBB.getParent());
+    setInsertPt(MBB, InsPt);
+  }
+
+  MachineIRBuilder(MachineInstr &MI) :
+    MachineIRBuilder(*MI.getParent(), MI.getIterator()) {
     setInstr(MI);
+    setDebugLoc(MI.getDebugLoc());
+  }
+
+  MachineIRBuilder(MachineInstr &MI, GISelChangeObserver &Observer) :
+    MachineIRBuilder(MI) {
+    setChangeObserver(Observer);
   }
 
   virtual ~MachineIRBuilder() = default;
@@ -272,10 +311,16 @@
   /// Set the insertion point before the specified position.
   /// \pre MBB must be in getMF().
   /// \pre II must be a valid iterator in MBB.
-  void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II);
+  void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II) {
+    assert(MBB.getParent() == &getMF() &&
+           "Basic block is in a different function");
+    State.MBB = &MBB;
+    State.II = II;
+  }
+
   /// @}
 
-  void setCSEInfo(GISelCSEInfo *Info);
+  void setCSEInfo(GISelCSEInfo *Info) { State.CSEInfo = Info; }
 
   /// \name Setters for the insertion point.
   /// @{
@@ -284,15 +329,34 @@
 
   /// Set the insertion point to the  end of \p MBB.
   /// \pre \p MBB must be contained by getMF().
-  void setMBB(MachineBasicBlock &MBB);
+  void setMBB(MachineBasicBlock &MBB) {
+    State.MBB = &MBB;
+    State.II = MBB.end();
+    assert(&getMF() == MBB.getParent() &&
+           "Basic block is in a different function");
+  }
 
   /// Set the insertion point to before MI.
   /// \pre MI must be in getMF().
-  void setInstr(MachineInstr &MI);
+  void setInstr(MachineInstr &MI) {
+    assert(MI.getParent() && "Instruction is not part of a basic block");
+    setMBB(*MI.getParent());
+    State.II = MI.getIterator();
+  }
   /// @}
 
-  void setChangeObserver(GISelChangeObserver &Observer);
-  void stopObservingChanges();
+  /// Set the insertion point to before MI, and set the debug loc to MI's loc.
+  /// \pre MI must be in getMF().
+  void setInstrAndDebugLoc(MachineInstr &MI) {
+    setInstr(MI);
+    setDebugLoc(MI.getDebugLoc());
+  }
+
+  void setChangeObserver(GISelChangeObserver &Observer) {
+    State.Observer = &Observer;
+  }
+
+  void stopObservingChanges() { State.Observer = nullptr; }
   /// @}
 
   /// Set the debug location to \p DL for all the next build instructions.
@@ -308,7 +372,9 @@
   /// \pre setBasicBlock or setMI must have been called.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildInstr(unsigned Opcode);
+  MachineInstrBuilder buildInstr(unsigned Opcode) {
+    return insertInstr(buildInstrNoInsert(Opcode));
+  }
 
   /// Build but don't insert <empty> = \p Opcode <empty>.
   ///
@@ -348,6 +414,17 @@
   /// given. Convert "llvm.dbg.label Label" to "DBG_LABEL Label".
   MachineInstrBuilder buildDbgLabel(const MDNode *Label);
 
+  /// Build and insert \p Res = G_DYN_STACKALLOC \p Size, \p Align
+  ///
+  /// G_DYN_STACKALLOC does a dynamic stack allocation and writes the address of
+  /// the allocated memory into \p Res.
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res must be a generic virtual register with pointer type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size,
+                                         Align Alignment);
+
   /// Build and insert \p Res = G_FRAME_INDEX \p Idx
   ///
   /// G_FRAME_INDEX materializes the address of an alloca value or other
@@ -371,10 +448,11 @@
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV);
 
-  /// Build and insert \p Res = G_GEP \p Op0, \p Op1
+  /// Build and insert \p Res = G_PTR_ADD \p Op0, \p Op1
   ///
-  /// G_GEP adds \p Op1 bytes to the pointer specified by \p Op0,
-  /// storing the resulting pointer in \p Res.
+  /// G_PTR_ADD adds \p Op1 addressible units to the pointer specified by \p Op0,
+  /// storing the resulting pointer in \p Res. Addressible units are typically
+  /// bytes but this can vary between targets.
   ///
   /// \pre setBasicBlock or setMI must have been called.
   /// \pre \p Res and \p Op0 must be generic virtual registers with pointer
@@ -382,32 +460,38 @@
   /// \pre \p Op1 must be a generic virtual register with scalar type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildGEP(const DstOp &Res, const SrcOp &Op0,
-                               const SrcOp &Op1);
+  MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0,
+                                  const SrcOp &Op1);
 
-  /// Materialize and insert \p Res = G_GEP \p Op0, (G_CONSTANT \p Value)
+  /// Materialize and insert \p Res = G_PTR_ADD \p Op0, (G_CONSTANT \p Value)
   ///
-  /// G_GEP adds \p Value bytes to the pointer specified by \p Op0,
+  /// G_PTR_ADD adds \p Value bytes to the pointer specified by \p Op0,
   /// storing the resulting pointer in \p Res. If \p Value is zero then no
-  /// G_GEP or G_CONSTANT will be created and \pre Op0 will be assigned to
+  /// G_PTR_ADD or G_CONSTANT will be created and \pre Op0 will be assigned to
   /// \p Res.
   ///
   /// \pre setBasicBlock or setMI must have been called.
   /// \pre \p Op0 must be a generic virtual register with pointer type.
   /// \pre \p ValueTy must be a scalar type.
   /// \pre \p Res must be 0. This is to detect confusion between
-  ///      materializeGEP() and buildGEP().
+  ///      materializePtrAdd() and buildPtrAdd().
   /// \post \p Res will either be a new generic virtual register of the same
   ///       type as \p Op0 or \p Op0 itself.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  Optional<MachineInstrBuilder> materializeGEP(Register &Res, Register Op0,
-                                               const LLT &ValueTy,
-                                               uint64_t Value);
+  Optional<MachineInstrBuilder> materializePtrAdd(Register &Res, Register Op0,
+                                                  const LLT ValueTy,
+                                                  uint64_t Value);
 
-  /// Build and insert \p Res = G_PTR_MASK \p Op0, \p NumBits
+  /// Build and insert \p Res = G_PTRMASK \p Op0, \p Op1
+  MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0,
+                                   const SrcOp &Op1) {
+    return buildInstr(TargetOpcode::G_PTRMASK, {Res}, {Op0, Op1});
+  }
+
+  /// Build and insert \p Res = G_PTRMASK \p Op0, \p G_CONSTANT (1 << NumBits) - 1
   ///
-  /// G_PTR_MASK clears the low bits of a pointer operand without destroying its
+  /// This clears the low bits of a pointer operand without destroying its
   /// pointer properties. This has the effect of rounding the address *down* to
   /// a specified alignment in bits.
   ///
@@ -418,8 +502,8 @@
   ///      be cleared in \p Op0.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0,
-                                   uint32_t NumBits);
+  MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0,
+                                          uint32_t NumBits);
 
   /// Build and insert \p Res, \p CarryOut = G_UADDO \p Op0, \p Op1
   ///
@@ -434,7 +518,27 @@
   ///
   /// \return The newly created instruction.
   MachineInstrBuilder buildUAddo(const DstOp &Res, const DstOp &CarryOut,
-                                 const SrcOp &Op0, const SrcOp &Op1);
+                                 const SrcOp &Op0, const SrcOp &Op1) {
+    return buildInstr(TargetOpcode::G_UADDO, {Res, CarryOut}, {Op0, Op1});
+  }
+
+  /// Build and insert \p Res, \p CarryOut = G_USUBO \p Op0, \p Op1
+  MachineInstrBuilder buildUSubo(const DstOp &Res, const DstOp &CarryOut,
+                                 const SrcOp &Op0, const SrcOp &Op1) {
+    return buildInstr(TargetOpcode::G_USUBO, {Res, CarryOut}, {Op0, Op1});
+  }
+
+  /// Build and insert \p Res, \p CarryOut = G_SADDO \p Op0, \p Op1
+  MachineInstrBuilder buildSAddo(const DstOp &Res, const DstOp &CarryOut,
+                                 const SrcOp &Op0, const SrcOp &Op1) {
+    return buildInstr(TargetOpcode::G_SADDO, {Res, CarryOut}, {Op0, Op1});
+  }
+
+  /// Build and insert \p Res, \p CarryOut = G_SUBO \p Op0, \p Op1
+  MachineInstrBuilder buildSSubo(const DstOp &Res, const DstOp &CarryOut,
+                                 const SrcOp &Op0, const SrcOp &Op1) {
+    return buildInstr(TargetOpcode::G_SSUBO, {Res, CarryOut}, {Op0, Op1});
+  }
 
   /// Build and insert \p Res, \p CarryOut = G_UADDE \p Op0,
   /// \p Op1, \p CarryIn
@@ -452,7 +556,34 @@
   /// \return The newly created instruction.
   MachineInstrBuilder buildUAdde(const DstOp &Res, const DstOp &CarryOut,
                                  const SrcOp &Op0, const SrcOp &Op1,
-                                 const SrcOp &CarryIn);
+                                 const SrcOp &CarryIn) {
+    return buildInstr(TargetOpcode::G_UADDE, {Res, CarryOut},
+                                             {Op0, Op1, CarryIn});
+  }
+
+  /// Build and insert \p Res, \p CarryOut = G_USUBE \p Op0, \p Op1, \p CarryInp
+  MachineInstrBuilder buildUSube(const DstOp &Res, const DstOp &CarryOut,
+                                 const SrcOp &Op0, const SrcOp &Op1,
+                                 const SrcOp &CarryIn) {
+    return buildInstr(TargetOpcode::G_USUBE, {Res, CarryOut},
+                                             {Op0, Op1, CarryIn});
+  }
+
+  /// Build and insert \p Res, \p CarryOut = G_SADDE \p Op0, \p Op1, \p CarryInp
+  MachineInstrBuilder buildSAdde(const DstOp &Res, const DstOp &CarryOut,
+                                 const SrcOp &Op0, const SrcOp &Op1,
+                                 const SrcOp &CarryIn) {
+    return buildInstr(TargetOpcode::G_SADDE, {Res, CarryOut},
+                                             {Op0, Op1, CarryIn});
+  }
+
+  /// Build and insert \p Res, \p CarryOut = G_SSUBE \p Op0, \p Op1, \p CarryInp
+  MachineInstrBuilder buildSSube(const DstOp &Res, const DstOp &CarryOut,
+                                 const SrcOp &Op0, const SrcOp &Op1,
+                                 const SrcOp &CarryIn) {
+    return buildInstr(TargetOpcode::G_SSUBE, {Res, CarryOut},
+                                             {Op0, Op1, CarryIn});
+  }
 
   /// Build and insert \p Res = G_ANYEXT \p Op0
   ///
@@ -484,16 +615,38 @@
   /// \return The newly created instruction.
   MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op);
 
+  /// Build and insert \p Res = G_SEXT_INREG \p Op, ImmOp
+  MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp) {
+    return buildInstr(TargetOpcode::G_SEXT_INREG, {Res}, {Op, SrcOp(ImmOp)});
+  }
+
+  /// Build and insert \p Res = G_FPEXT \p Op
+  MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op,
+                                 Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_FPEXT, {Res}, {Op}, Flags);
+  }
+
+
   /// Build and insert a G_PTRTOINT instruction.
   MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src) {
     return buildInstr(TargetOpcode::G_PTRTOINT, {Dst}, {Src});
   }
 
+  /// Build and insert a G_INTTOPTR instruction.
+  MachineInstrBuilder buildIntToPtr(const DstOp &Dst, const SrcOp &Src) {
+    return buildInstr(TargetOpcode::G_INTTOPTR, {Dst}, {Src});
+  }
+
   /// Build and insert \p Dst = G_BITCAST \p Src
   MachineInstrBuilder buildBitcast(const DstOp &Dst, const SrcOp &Src) {
     return buildInstr(TargetOpcode::G_BITCAST, {Dst}, {Src});
   }
 
+    /// Build and insert \p Dst = G_ADDRSPACE_CAST \p Src
+  MachineInstrBuilder buildAddrSpaceCast(const DstOp &Dst, const SrcOp &Src) {
+    return buildInstr(TargetOpcode::G_ADDRSPACE_CAST, {Dst}, {Src});
+  }
+
   /// \return The opcode of the extension the target wants to use for boolean
   /// values.
   unsigned getBoolExtOp(bool IsVec, bool IsFP) const;
@@ -583,7 +736,7 @@
   ///      depend on bit 0 (for now).
   ///
   /// \return The newly created instruction.
-  MachineInstrBuilder buildBrCond(Register Tst, MachineBasicBlock &Dest);
+  MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest);
 
   /// Build and insert G_BRINDIRECT \p Tgt
   ///
@@ -667,7 +820,17 @@
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr,
-                                MachineMemOperand &MMO);
+                                MachineMemOperand &MMO) {
+    return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO);
+  }
+
+  /// Build and insert a G_LOAD instruction, while constructing the
+  /// MachineMemOperand.
+  MachineInstrBuilder
+  buildLoad(const DstOp &Res, const SrcOp &Addr, MachinePointerInfo PtrInfo,
+            Align Alignment,
+            MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
+            const AAMDNodes &AAInfo = AAMDNodes());
 
   /// Build and insert `Res = <opcode> Addr, MMO`.
   ///
@@ -681,6 +844,14 @@
   MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res,
                                      const SrcOp &Addr, MachineMemOperand &MMO);
 
+  /// Helper to create a load from a constant offset given a base address. Load
+  /// the type of \p Dst from \p Offset from the given base address and memory
+  /// operand.
+  MachineInstrBuilder buildLoadFromOffset(const DstOp &Dst,
+                                          const SrcOp &BasePtr,
+                                          MachineMemOperand &BaseMMO,
+                                          int64_t Offset);
+
   /// Build and insert `G_STORE Val, Addr, MMO`.
   ///
   /// Stores the value \p Val to \p Addr.
@@ -693,6 +864,14 @@
   MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr,
                                  MachineMemOperand &MMO);
 
+  /// Build and insert a G_STORE instruction, while constructing the
+  /// MachineMemOperand.
+  MachineInstrBuilder
+  buildStore(const SrcOp &Val, const SrcOp &Addr, MachinePointerInfo PtrInfo,
+             Align Alignment,
+             MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
+             const AAMDNodes &AAInfo = AAMDNodes());
+
   /// Build and insert `Res0, ... = G_EXTRACT Src, Idx0`.
   ///
   /// \pre setBasicBlock or setMI must have been called.
@@ -732,6 +911,8 @@
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef<Register> Ops);
+  MachineInstrBuilder buildMerge(const DstOp &Res,
+                                 std::initializer_list<SrcOp> Ops);
 
   /// Build and insert \p Res0, ... = G_UNMERGE_VALUES \p Op
   ///
@@ -782,6 +963,23 @@
   MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res,
                                             ArrayRef<Register> Ops);
 
+  /// Build and insert a vector splat of a scalar \p Src using a
+  /// G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idiom.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Src must have the same type as the element type of \p Dst
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src);
+
+  /// Build and insert \p Res = G_SHUFFLE_VECTOR \p Src1, \p Src2, \p Mask
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1,
+                                         const SrcOp &Src2, ArrayRef<int> Mask);
+
   /// Build and insert \p Res = G_CONCAT_VECTORS \p Op0, ...
   ///
   /// G_CONCAT_VECTORS creates a vector from the concatenation of 2 or more
@@ -796,8 +994,8 @@
   MachineInstrBuilder buildConcatVectors(const DstOp &Res,
                                          ArrayRef<Register> Ops);
 
-  MachineInstrBuilder buildInsert(Register Res, Register Src,
-                                  Register Op, unsigned Index);
+  MachineInstrBuilder buildInsert(const DstOp &Res, const SrcOp &Src,
+                                  const SrcOp &Op, unsigned Index);
 
   /// Build and insert either a G_INTRINSIC (if \p HasSideEffects is false) or
   /// G_INTRINSIC_W_SIDE_EFFECTS instruction. Its first operand will be the
@@ -824,7 +1022,8 @@
   /// \pre \p Res must be smaller than \p Op
   ///
   /// \return The newly created instruction.
-  MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op);
+  MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op,
+                                   Optional<unsigned> Flags = None);
 
   /// Build and insert \p Res = G_TRUNC \p Op
   ///
@@ -867,7 +1066,8 @@
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res,
-                                const SrcOp &Op0, const SrcOp &Op1);
+                                const SrcOp &Op0, const SrcOp &Op1,
+                                Optional<unsigned> Flags = None);
 
   /// Build and insert a \p Res = G_SELECT \p Tst, \p Op0, \p Op1
   ///
@@ -880,7 +1080,8 @@
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst,
-                                  const SrcOp &Op0, const SrcOp &Op1);
+                                  const SrcOp &Op0, const SrcOp &Op1,
+                                  Optional<unsigned> Flags = None);
 
   /// Build and insert \p Res = G_INSERT_VECTOR_ELT \p Val,
   /// \p Elt, \p Idx
@@ -961,8 +1162,8 @@
   ///      same type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildAtomicRMW(unsigned Opcode, Register OldValRes,
-                                     Register Addr, Register Val,
+  MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes,
+                                     const SrcOp &Addr, const SrcOp &Val,
                                      MachineMemOperand &MMO);
 
   /// Build and insert `OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO`.
@@ -1135,9 +1336,24 @@
   MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr,
                                          Register Val, MachineMemOperand &MMO);
 
+  /// Build and insert `OldValRes<def> = G_ATOMICRMW_FADD Addr, Val, MMO`.
+  MachineInstrBuilder buildAtomicRMWFAdd(
+    const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
+    MachineMemOperand &MMO);
+
+  /// Build and insert `OldValRes<def> = G_ATOMICRMW_FSUB Addr, Val, MMO`.
+  MachineInstrBuilder buildAtomicRMWFSub(
+        const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
+        MachineMemOperand &MMO);
+
   /// Build and insert `G_FENCE Ordering, Scope`.
   MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope);
 
+  /// Build and insert \p Dst = G_FREEZE \p Src
+  MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src) {
+    return buildInstr(TargetOpcode::G_FREEZE, {Dst}, {Src});
+  }
+
   /// Build and insert \p Res = G_BLOCK_ADDR \p BA
   ///
   /// G_BLOCK_ADDR computes the address of a basic block.
@@ -1210,6 +1426,36 @@
     return buildInstr(TargetOpcode::G_SMULH, {Dst}, {Src0, Src1}, Flags);
   }
 
+  MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0,
+                                const SrcOp &Src1,
+                                Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_FMUL, {Dst}, {Src0, Src1}, Flags);
+  }
+
+  MachineInstrBuilder buildFMinNum(const DstOp &Dst, const SrcOp &Src0,
+                                   const SrcOp &Src1,
+                                   Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_FMINNUM, {Dst}, {Src0, Src1}, Flags);
+  }
+
+  MachineInstrBuilder buildFMaxNum(const DstOp &Dst, const SrcOp &Src0,
+                                   const SrcOp &Src1,
+                                   Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_FMAXNUM, {Dst}, {Src0, Src1}, Flags);
+  }
+
+  MachineInstrBuilder buildFMinNumIEEE(const DstOp &Dst, const SrcOp &Src0,
+                                       const SrcOp &Src1,
+                                       Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_FMINNUM_IEEE, {Dst}, {Src0, Src1}, Flags);
+  }
+
+  MachineInstrBuilder buildFMaxNumIEEE(const DstOp &Dst, const SrcOp &Src0,
+                                       const SrcOp &Src1,
+                                       Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_FMAXNUM_IEEE, {Dst}, {Src0, Src1}, Flags);
+  }
+
   MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0,
                                const SrcOp &Src1,
                                Optional<unsigned> Flags = None) {
@@ -1298,32 +1544,99 @@
     return buildInstr(TargetOpcode::G_CTTZ_ZERO_UNDEF, {Dst}, {Src0});
   }
 
+  /// Build and insert \p Dst = G_BSWAP \p Src0
+  MachineInstrBuilder buildBSwap(const DstOp &Dst, const SrcOp &Src0) {
+    return buildInstr(TargetOpcode::G_BSWAP, {Dst}, {Src0});
+  }
+
   /// Build and insert \p Res = G_FADD \p Op0, \p Op1
   MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0,
-                                const SrcOp &Src1) {
-    return buildInstr(TargetOpcode::G_FADD, {Dst}, {Src0, Src1});
+                                const SrcOp &Src1,
+                                Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_FADD, {Dst}, {Src0, Src1}, Flags);
   }
 
   /// Build and insert \p Res = G_FSUB \p Op0, \p Op1
   MachineInstrBuilder buildFSub(const DstOp &Dst, const SrcOp &Src0,
-                                const SrcOp &Src1) {
-    return buildInstr(TargetOpcode::G_FSUB, {Dst}, {Src0, Src1});
+                                const SrcOp &Src1,
+                                Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_FSUB, {Dst}, {Src0, Src1}, Flags);
+  }
+
+  /// Build and insert \p Res = G_FDIV \p Op0, \p Op1
+  MachineInstrBuilder buildFDiv(const DstOp &Dst, const SrcOp &Src0,
+                                const SrcOp &Src1,
+                                Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_FDIV, {Dst}, {Src0, Src1}, Flags);
   }
 
   /// Build and insert \p Res = G_FMA \p Op0, \p Op1, \p Op2
   MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0,
-                               const SrcOp &Src1, const SrcOp &Src2) {
-    return buildInstr(TargetOpcode::G_FMA, {Dst}, {Src0, Src1, Src2});
+                               const SrcOp &Src1, const SrcOp &Src2,
+                               Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_FMA, {Dst}, {Src0, Src1, Src2}, Flags);
+  }
+
+  /// Build and insert \p Res = G_FMAD \p Op0, \p Op1, \p Op2
+  MachineInstrBuilder buildFMAD(const DstOp &Dst, const SrcOp &Src0,
+                                const SrcOp &Src1, const SrcOp &Src2,
+                                Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_FMAD, {Dst}, {Src0, Src1, Src2}, Flags);
   }
 
   /// Build and insert \p Res = G_FNEG \p Op0
-  MachineInstrBuilder buildFNeg(const DstOp &Dst, const SrcOp &Src0) {
-    return buildInstr(TargetOpcode::G_FNEG, {Dst}, {Src0});
+  MachineInstrBuilder buildFNeg(const DstOp &Dst, const SrcOp &Src0,
+                                Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_FNEG, {Dst}, {Src0}, Flags);
   }
 
   /// Build and insert \p Res = G_FABS \p Op0
-  MachineInstrBuilder buildFAbs(const DstOp &Dst, const SrcOp &Src0) {
-    return buildInstr(TargetOpcode::G_FABS, {Dst}, {Src0});
+  MachineInstrBuilder buildFAbs(const DstOp &Dst, const SrcOp &Src0,
+                                Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_FABS, {Dst}, {Src0}, Flags);
+  }
+
+  /// Build and insert \p Dst = G_FCANONICALIZE \p Src0
+  MachineInstrBuilder buildFCanonicalize(const DstOp &Dst, const SrcOp &Src0,
+                                         Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_FCANONICALIZE, {Dst}, {Src0}, Flags);
+  }
+
+  /// Build and insert \p Dst = G_INTRINSIC_TRUNC \p Src0
+  MachineInstrBuilder buildIntrinsicTrunc(const DstOp &Dst, const SrcOp &Src0,
+                                         Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_INTRINSIC_TRUNC, {Dst}, {Src0}, Flags);
+  }
+
+  /// Build and insert \p Res = GFFLOOR \p Op0, \p Op1
+  MachineInstrBuilder buildFFloor(const DstOp &Dst, const SrcOp &Src0,
+                                          Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_FFLOOR, {Dst}, {Src0}, Flags);
+  }
+
+  /// Build and insert \p Dst = G_FLOG \p Src
+  MachineInstrBuilder buildFLog(const DstOp &Dst, const SrcOp &Src,
+                                Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_FLOG, {Dst}, {Src}, Flags);
+  }
+
+  /// Build and insert \p Dst = G_FLOG2 \p Src
+  MachineInstrBuilder buildFLog2(const DstOp &Dst, const SrcOp &Src,
+                                Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_FLOG2, {Dst}, {Src}, Flags);
+  }
+
+  /// Build and insert \p Dst = G_FEXP2 \p Src
+  MachineInstrBuilder buildFExp2(const DstOp &Dst, const SrcOp &Src,
+                                Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_FEXP2, {Dst}, {Src}, Flags);
+  }
+
+  /// Build and insert \p Dst = G_FPOW \p Src0, \p Src1
+  MachineInstrBuilder buildFPow(const DstOp &Dst, const SrcOp &Src0,
+                                const SrcOp &Src1,
+                                Optional<unsigned> Flags = None) {
+    return buildInstr(TargetOpcode::G_FPOW, {Dst}, {Src0, Src1}, Flags);
   }
 
   /// Build and insert \p Res = G_FCOPYSIGN \p Op0, \p Op1
@@ -1376,6 +1689,11 @@
     return buildInstr(TargetOpcode::G_UMAX, {Dst}, {Src0, Src1});
   }
 
+  /// Build and insert \p Dst = G_ABS \p Src
+  MachineInstrBuilder buildAbs(const DstOp &Dst, const SrcOp &Src) {
+    return buildInstr(TargetOpcode::G_ABS, {Dst}, {Src});
+  }
+
   /// Build and insert \p Res = G_JUMP_TABLE \p JTI
   ///
   /// G_JUMP_TABLE sets \p Res to the address of the jump table specified by
@@ -1384,6 +1702,101 @@
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI);
 
+  /// Build and insert \p Res = G_VECREDUCE_SEQ_FADD \p ScalarIn, \p VecIn
+  ///
+  /// \p ScalarIn is the scalar accumulator input to start the sequential
+  /// reduction operation of \p VecIn.
+  MachineInstrBuilder buildVecReduceSeqFAdd(const DstOp &Dst,
+                                            const SrcOp &ScalarIn,
+                                            const SrcOp &VecIn) {
+    return buildInstr(TargetOpcode::G_VECREDUCE_SEQ_FADD, {Dst},
+                      {ScalarIn, {VecIn}});
+  }
+
+  /// Build and insert \p Res = G_VECREDUCE_SEQ_FMUL \p ScalarIn, \p VecIn
+  ///
+  /// \p ScalarIn is the scalar accumulator input to start the sequential
+  /// reduction operation of \p VecIn.
+  MachineInstrBuilder buildVecReduceSeqFMul(const DstOp &Dst,
+                                            const SrcOp &ScalarIn,
+                                            const SrcOp &VecIn) {
+    return buildInstr(TargetOpcode::G_VECREDUCE_SEQ_FMUL, {Dst},
+                      {ScalarIn, {VecIn}});
+  }
+
+  /// Build and insert \p Res = G_VECREDUCE_FADD \p Src
+  ///
+  /// \p ScalarIn is the scalar accumulator input to the reduction operation of
+  /// \p VecIn.
+  MachineInstrBuilder buildVecReduceFAdd(const DstOp &Dst,
+                                         const SrcOp &ScalarIn,
+                                         const SrcOp &VecIn) {
+    return buildInstr(TargetOpcode::G_VECREDUCE_FADD, {Dst}, {ScalarIn, VecIn});
+  }
+
+  /// Build and insert \p Res = G_VECREDUCE_FMUL \p Src
+  ///
+  /// \p ScalarIn is the scalar accumulator input to the reduction operation of
+  /// \p VecIn.
+  MachineInstrBuilder buildVecReduceFMul(const DstOp &Dst,
+                                         const SrcOp &ScalarIn,
+                                         const SrcOp &VecIn) {
+    return buildInstr(TargetOpcode::G_VECREDUCE_FMUL, {Dst}, {ScalarIn, VecIn});
+  }
+
+  /// Build and insert \p Res = G_VECREDUCE_FMAX \p Src
+  MachineInstrBuilder buildVecReduceFMax(const DstOp &Dst, const SrcOp &Src) {
+    return buildInstr(TargetOpcode::G_VECREDUCE_FMAX, {Dst}, {Src});
+  }
+
+  /// Build and insert \p Res = G_VECREDUCE_FMIN \p Src
+  MachineInstrBuilder buildVecReduceFMin(const DstOp &Dst, const SrcOp &Src) {
+    return buildInstr(TargetOpcode::G_VECREDUCE_FMIN, {Dst}, {Src});
+  }
+  /// Build and insert \p Res = G_VECREDUCE_ADD \p Src
+  MachineInstrBuilder buildVecReduceAdd(const DstOp &Dst, const SrcOp &Src) {
+    return buildInstr(TargetOpcode::G_VECREDUCE_ADD, {Dst}, {Src});
+  }
+
+  /// Build and insert \p Res = G_VECREDUCE_MUL \p Src
+  MachineInstrBuilder buildVecReduceMul(const DstOp &Dst, const SrcOp &Src) {
+    return buildInstr(TargetOpcode::G_VECREDUCE_MUL, {Dst}, {Src});
+  }
+
+  /// Build and insert \p Res = G_VECREDUCE_AND \p Src
+  MachineInstrBuilder buildVecReduceAnd(const DstOp &Dst, const SrcOp &Src) {
+    return buildInstr(TargetOpcode::G_VECREDUCE_AND, {Dst}, {Src});
+  }
+
+  /// Build and insert \p Res = G_VECREDUCE_OR \p Src
+  MachineInstrBuilder buildVecReduceOr(const DstOp &Dst, const SrcOp &Src) {
+    return buildInstr(TargetOpcode::G_VECREDUCE_OR, {Dst}, {Src});
+  }
+
+  /// Build and insert \p Res = G_VECREDUCE_XOR \p Src
+  MachineInstrBuilder buildVecReduceXor(const DstOp &Dst, const SrcOp &Src) {
+    return buildInstr(TargetOpcode::G_VECREDUCE_XOR, {Dst}, {Src});
+  }
+
+  /// Build and insert \p Res = G_VECREDUCE_SMAX \p Src
+  MachineInstrBuilder buildVecReduceSMax(const DstOp &Dst, const SrcOp &Src) {
+    return buildInstr(TargetOpcode::G_VECREDUCE_SMAX, {Dst}, {Src});
+  }
+
+  /// Build and insert \p Res = G_VECREDUCE_SMIN \p Src
+  MachineInstrBuilder buildVecReduceSMin(const DstOp &Dst, const SrcOp &Src) {
+    return buildInstr(TargetOpcode::G_VECREDUCE_SMIN, {Dst}, {Src});
+  }
+
+  /// Build and insert \p Res = G_VECREDUCE_UMAX \p Src
+  MachineInstrBuilder buildVecReduceUMax(const DstOp &Dst, const SrcOp &Src) {
+    return buildInstr(TargetOpcode::G_VECREDUCE_UMAX, {Dst}, {Src});
+  }
+
+  /// Build and insert \p Res = G_VECREDUCE_UMIN \p Src
+  MachineInstrBuilder buildVecReduceUMin(const DstOp &Dst, const SrcOp &Src) {
+    return buildInstr(TargetOpcode::G_VECREDUCE_UMIN, {Dst}, {Src});
+  }
   virtual MachineInstrBuilder buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
                                          ArrayRef<SrcOp> SrcOps,
                                          Optional<unsigned> Flags = None);
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
index e84b1c3..da78540 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
@@ -20,6 +20,7 @@
 #include "llvm/ADT/iterator_range.h"
 #include "llvm/CodeGen/Register.h"
 #include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/LowLevelTypeImpl.h"
 #include <cassert>
 #include <initializer_list>
 #include <memory>
@@ -103,36 +104,37 @@
   /// Currently the TableGen-like file would look like:
   /// \code
   /// PartialMapping[] = {
-  /// /*32-bit add*/    {0, 32, GPR}, // Scalar entry repeated for first vec elt.
-  /// /*2x32-bit add*/  {0, 32, GPR}, {32, 32, GPR},
-  /// /*<2x32-bit> vadd {0, 64, VPR}
+  /// /*32-bit add*/      {0, 32, GPR}, // Scalar entry repeated for first
+  ///                                   // vec elt.
+  /// /*2x32-bit add*/    {0, 32, GPR}, {32, 32, GPR},
+  /// /*<2x32-bit> vadd*/ {0, 64, VPR}
   /// }; // PartialMapping duplicated.
   ///
   /// ValueMapping[] {
-  ///   /*plain 32-bit add*/ {&PartialMapping[0], 1},
+  ///   /*plain 32-bit add*/       {&PartialMapping[0], 1},
   ///   /*expanded vadd on 2xadd*/ {&PartialMapping[1], 2},
-  ///   /*plain <2x32-bit> vadd*/ {&PartialMapping[3], 1}
+  ///   /*plain <2x32-bit> vadd*/  {&PartialMapping[3], 1}
   /// };
   /// \endcode
   ///
   /// With the array of pointer, we would have:
   /// \code
   /// PartialMapping[] = {
-  /// /*32-bit add lower */ {0, 32, GPR},
+  /// /*32-bit add lower */ { 0, 32, GPR},
   /// /*32-bit add upper */ {32, 32, GPR},
-  /// /*<2x32-bit> vadd {0, 64, VPR}
+  /// /*<2x32-bit> vadd */  { 0, 64, VPR}
   /// }; // No more duplication.
   ///
   /// BreakDowns[] = {
-  /// /*AddBreakDown*/ &PartialMapping[0],
+  /// /*AddBreakDown*/   &PartialMapping[0],
   /// /*2xAddBreakDown*/ &PartialMapping[0], &PartialMapping[1],
-  /// /*VAddBreakDown*/ &PartialMapping[2]
+  /// /*VAddBreakDown*/  &PartialMapping[2]
   /// }; // Addresses of PartialMapping duplicated (smaller).
   ///
   /// ValueMapping[] {
-  ///   /*plain 32-bit add*/ {&BreakDowns[0], 1},
+  ///   /*plain 32-bit add*/       {&BreakDowns[0], 1},
   ///   /*expanded vadd on 2xadd*/ {&BreakDowns[1], 2},
-  ///   /*plain <2x32-bit> vadd*/ {&BreakDowns[3], 1}
+  ///   /*plain <2x32-bit> vadd*/  {&BreakDowns[3], 1}
   /// };
   /// \endcode
   ///
@@ -543,7 +545,7 @@
   const RegisterBank *
   getRegBankFromConstraints(const MachineInstr &MI, unsigned OpIdx,
                             const TargetInstrInfo &TII,
-                            const TargetRegisterInfo &TRI) const;
+                            const MachineRegisterInfo &MRI) const;
 
   /// Helper method to apply something that is like the default mapping.
   /// Basically, that means that \p OpdMapper.getMI() is left untouched
@@ -599,7 +601,7 @@
   ///
   /// \todo This should be TableGen'ed.
   virtual const RegisterBank &
-  getRegBankFromRegClass(const TargetRegisterClass &RC) const {
+  getRegBankFromRegClass(const TargetRegisterClass &RC, LLT Ty) const {
     llvm_unreachable("The target must override this method");
   }
 
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Types.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Types.h
deleted file mode 100644
index 4fd7043..0000000
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Types.h
+++ /dev/null
@@ -1,33 +0,0 @@
-//===- llvm/CodeGen/GlobalISel/Types.h - Types used by GISel ----*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// \file
-/// This file describes high level types that are used by several passes or
-/// APIs involved in the GlobalISel pipeline.
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_GLOBALISEL_TYPES_H
-#define LLVM_CODEGEN_GLOBALISEL_TYPES_H
-
-#include "llvm/ADT/DenseMap.h"
-
-namespace llvm {
-
-class Value;
-
-/// Map a value to a virtual register.
-/// For now, we chose to map aggregate types to on single virtual
-/// register. This might be revisited if it turns out to be inefficient.
-/// PR26161 tracks that.
-/// Note: We need to expose this type to the target hooks for thing like
-/// ABI lowering that would be used during IRTranslation.
-using ValueToVReg = DenseMap<const Value *, unsigned>;
-
-} // end namespace llvm
-
-#endif // LLVM_CODEGEN_GLOBALISEL_TYPES_H
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h
index 6946aad..9bd5180 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -16,23 +16,28 @@
 
 #include "llvm/ADT/StringRef.h"
 #include "llvm/CodeGen/Register.h"
+#include "llvm/Support/Alignment.h"
+#include "llvm/Support/LowLevelTypeImpl.h"
+#include <cstdint>
 
 namespace llvm {
 
 class AnalysisUsage;
+class GISelKnownBits;
 class MachineFunction;
 class MachineInstr;
 class MachineOperand;
 class MachineOptimizationRemarkEmitter;
 class MachineOptimizationRemarkMissed;
+struct MachinePointerInfo;
 class MachineRegisterInfo;
 class MCInstrDesc;
 class RegisterBankInfo;
 class TargetInstrInfo;
+class TargetLowering;
 class TargetPassConfig;
 class TargetRegisterInfo;
 class TargetRegisterClass;
-class Twine;
 class ConstantFP;
 class APFloat;
 
@@ -40,9 +45,9 @@
 /// create a new virtual register in the correct class.
 ///
 /// \return The virtual register constrained to the right register class.
-unsigned constrainRegToClass(MachineRegisterInfo &MRI,
+Register constrainRegToClass(MachineRegisterInfo &MRI,
                              const TargetInstrInfo &TII,
-                             const RegisterBankInfo &RBI, unsigned Reg,
+                             const RegisterBankInfo &RBI, Register Reg,
                              const TargetRegisterClass &RegClass);
 
 /// Constrain the Register operand OpIdx, so that it is now constrained to the
@@ -52,14 +57,14 @@
 /// definition. The debug location of \p InsertPt is used for the new copy.
 ///
 /// \return The virtual register constrained to the right register class.
-unsigned constrainOperandRegClass(const MachineFunction &MF,
+Register constrainOperandRegClass(const MachineFunction &MF,
                                   const TargetRegisterInfo &TRI,
                                   MachineRegisterInfo &MRI,
                                   const TargetInstrInfo &TII,
                                   const RegisterBankInfo &RBI,
                                   MachineInstr &InsertPt,
                                   const TargetRegisterClass &RegClass,
-                                  const MachineOperand &RegMO, unsigned OpIdx);
+                                  const MachineOperand &RegMO);
 
 /// Try to constrain Reg so that it is usable by argument OpIdx of the
 /// provided MCInstrDesc \p II. If this fails, create a new virtual
@@ -70,7 +75,7 @@
 /// InsertPt is used for the new copy.
 ///
 /// \return The virtual register constrained to the right register class.
-unsigned constrainOperandRegClass(const MachineFunction &MF,
+Register constrainOperandRegClass(const MachineFunction &MF,
                                   const TargetRegisterInfo &TRI,
                                   MachineRegisterInfo &MRI,
                                   const TargetInstrInfo &TII,
@@ -91,6 +96,11 @@
                                       const TargetInstrInfo &TII,
                                       const TargetRegisterInfo &TRI,
                                       const RegisterBankInfo &RBI);
+
+/// Check if DstReg can be replaced with SrcReg depending on the register
+/// constraints.
+bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI);
+
 /// Check whether an instruction \p MI is dead: it only defines dead virtual
 /// registers, and doesn't have other side effects.
 bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI);
@@ -106,26 +116,39 @@
                         const char *PassName, StringRef Msg,
                         const MachineInstr &MI);
 
+/// Report an ISel warning as a missed optimization remark to the LLVMContext's
+/// diagnostic stream.
+void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
+                        MachineOptimizationRemarkEmitter &MORE,
+                        MachineOptimizationRemarkMissed &R);
+
+/// If \p VReg is defined by a G_CONSTANT, return the corresponding value.
+Optional<APInt> getConstantVRegVal(Register VReg,
+                                   const MachineRegisterInfo &MRI);
+
 /// If \p VReg is defined by a G_CONSTANT fits in int64_t
 /// returns it.
-Optional<int64_t> getConstantVRegVal(unsigned VReg,
-                                     const MachineRegisterInfo &MRI);
+Optional<int64_t> getConstantVRegSExtVal(Register VReg,
+                                         const MachineRegisterInfo &MRI);
+
 /// Simple struct used to hold a constant integer value and a virtual
 /// register.
 struct ValueAndVReg {
-  int64_t Value;
-  unsigned VReg;
+  APInt Value;
+  Register VReg;
 };
 /// If \p VReg is defined by a statically evaluable chain of
-/// instructions rooted on a G_CONSTANT (\p LookThroughInstrs == true)
-/// and that constant fits in int64_t, returns its value as well as
-/// the virtual register defined by this G_CONSTANT.
-/// When \p LookThroughInstrs == false, this function behaves like
+/// instructions rooted on a G_F/CONSTANT (\p LookThroughInstrs == true)
+/// and that constant fits in int64_t, returns its value as well as the
+/// virtual register defined by this G_F/CONSTANT.
+/// When \p LookThroughInstrs == false this function behaves like
 /// getConstantVRegVal.
+/// When \p HandleFConstants == false the function bails on G_FCONSTANTs.
 Optional<ValueAndVReg>
-getConstantVRegValWithLookThrough(unsigned VReg, const MachineRegisterInfo &MRI,
-                                  bool LookThroughInstrs = true);
-const ConstantFP* getConstantFPVRegVal(unsigned VReg,
+getConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI,
+                                  bool LookThroughInstrs = true,
+                                  bool HandleFConstants = true);
+const ConstantFP* getConstantFPVRegVal(Register VReg,
                                        const MachineRegisterInfo &MRI);
 
 /// See if Reg is defined by an single def instruction that is
@@ -134,12 +157,30 @@
 MachineInstr *getOpcodeDef(unsigned Opcode, Register Reg,
                            const MachineRegisterInfo &MRI);
 
-/// Find the def instruction for \p Reg, folding away any trivial copies. Note
-/// it may still return a COPY, if it changes the type. May return nullptr if \p
-/// Reg is not a generic virtual register.
+/// Simple struct used to hold a Register value and the instruction which
+/// defines it.
+struct DefinitionAndSourceRegister {
+  MachineInstr *MI;
+  Register Reg;
+};
+
+/// Find the def instruction for \p Reg, and underlying value Register folding
+/// away any copies.
+Optional<DefinitionAndSourceRegister>
+getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
+
+/// Find the def instruction for \p Reg, folding away any trivial copies. May
+/// return nullptr if \p Reg is not a generic virtual register.
 MachineInstr *getDefIgnoringCopies(Register Reg,
                                    const MachineRegisterInfo &MRI);
 
+/// Find the source register for \p Reg, folding away any trivial copies. It
+/// will be an output register of the instruction that getDefIgnoringCopies
+/// returns. May return an invalid register if \p Reg is not a generic virtual
+/// register.
+Register getSrcRegIgnoringCopies(Register Reg,
+                                 const MachineRegisterInfo &MRI);
+
 /// Returns an APFloat from Val converted to the appropriate size.
 APFloat getAPFloatFromSize(double Val, unsigned Size);
 
@@ -147,8 +188,90 @@
 /// fallback.
 void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU);
 
-Optional<APInt> ConstantFoldBinOp(unsigned Opcode, const unsigned Op1,
-                                  const unsigned Op2,
+Optional<APInt> ConstantFoldBinOp(unsigned Opcode, const Register Op1,
+                                  const Register Op2,
                                   const MachineRegisterInfo &MRI);
+
+Optional<APInt> ConstantFoldExtOp(unsigned Opcode, const Register Op1,
+                                  uint64_t Imm, const MachineRegisterInfo &MRI);
+
+/// Test if the given value is known to have exactly one bit set. This differs
+/// from computeKnownBits in that it doesn't necessarily determine which bit is
+/// set.
+bool isKnownToBeAPowerOfTwo(Register Val, const MachineRegisterInfo &MRI,
+                            GISelKnownBits *KnownBits = nullptr);
+
+/// Returns true if \p Val can be assumed to never be a NaN. If \p SNaN is true,
+/// this returns if \p Val can be assumed to never be a signaling NaN.
+bool isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
+                     bool SNaN = false);
+
+/// Returns true if \p Val can be assumed to never be a signaling NaN.
+inline bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI) {
+  return isKnownNeverNaN(Val, MRI, true);
+}
+
+Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO);
+
+/// Return a virtual register corresponding to the incoming argument register \p
+/// PhysReg. This register is expected to have class \p RC, and optional type \p
+/// RegTy. This assumes all references to the register will use the same type.
+///
+/// If there is an existing live-in argument register, it will be returned.
+/// This will also ensure there is a valid copy
+Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII,
+                                  MCRegister PhysReg,
+                                  const TargetRegisterClass &RC,
+                                  LLT RegTy = LLT());
+
+/// Return the least common multiple type of \p OrigTy and \p TargetTy, by changing the
+/// number of vector elements or scalar bitwidth. The intent is a
+/// G_MERGE_VALUES, G_BUILD_VECTOR, or G_CONCAT_VECTORS can be constructed from
+/// \p OrigTy elements, and unmerged into \p TargetTy
+LLVM_READNONE
+LLT getLCMType(LLT OrigTy, LLT TargetTy);
+
+/// Return a type where the total size is the greatest common divisor of \p
+/// OrigTy and \p TargetTy. This will try to either change the number of vector
+/// elements, or bitwidth of scalars. The intent is the result type can be used
+/// as the result of a G_UNMERGE_VALUES from \p OrigTy, and then some
+/// combination of G_MERGE_VALUES, G_BUILD_VECTOR and G_CONCAT_VECTORS (possibly
+/// with intermediate casts) can re-form \p TargetTy.
+///
+/// If these are vectors with different element types, this will try to produce
+/// a vector with a compatible total size, but the element type of \p OrigTy. If
+/// this can't be satisfied, this will produce a scalar smaller than the
+/// original vector elements.
+///
+/// In the worst case, this returns LLT::scalar(1)
+LLVM_READNONE
+LLT getGCDType(LLT OrigTy, LLT TargetTy);
+
+/// \returns The splat index of a G_SHUFFLE_VECTOR \p MI when \p MI is a splat.
+/// If \p MI is not a splat, returns None.
+Optional<int> getSplatIndex(MachineInstr &MI);
+
+/// Returns a scalar constant of a G_BUILD_VECTOR splat if it exists.
+Optional<int64_t> getBuildVectorConstantSplat(const MachineInstr &MI,
+                                              const MachineRegisterInfo &MRI);
+
+/// Return true if the specified instruction is a G_BUILD_VECTOR or
+/// G_BUILD_VECTOR_TRUNC where all of the elements are 0 or undef.
+bool isBuildVectorAllZeros(const MachineInstr &MI,
+                           const MachineRegisterInfo &MRI);
+
+/// Return true if the specified instruction is a G_BUILD_VECTOR or
+/// G_BUILD_VECTOR_TRUNC where all of the elements are ~0 or undef.
+bool isBuildVectorAllOnes(const MachineInstr &MI,
+                          const MachineRegisterInfo &MRI);
+
+/// Returns true if given the TargetLowering's boolean contents information,
+/// the value \p Val contains a true value.
+bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
+                    bool IsFP);
+
+/// Returns an integer representing true, as defined by the
+/// TargetBooleanContents.
+int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP);
 } // End namespace llvm.
 #endif