Update prebuilt Clang to r365631c1 from Android.

The version we had was segfaulting.

Bug: 132420445
Change-Id: Icb45a6fe0b4e2166f7895e669df1157cec9fb4e0
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CSEInfo.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CSEInfo.h
index 97f3cd5..5a44e67 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CSEInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CSEInfo.h
@@ -13,6 +13,7 @@
 #define LLVM_CODEGEN_GLOBALISEL_CSEINFO_H
 
 #include "llvm/ADT/FoldingSet.h"
+#include "llvm/CodeGen/CSEConfigBase.h"
 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
 #include "llvm/CodeGen/GlobalISel/GISelWorkList.h"
 #include "llvm/CodeGen/GlobalISel/Utils.h"
@@ -36,25 +37,27 @@
   void Profile(FoldingSetNodeID &ID);
 };
 
-// Class representing some configuration that can be done during CSE analysis.
-// Currently it only supports shouldCSE method that each pass can set.
-class CSEConfig {
+// A CSE config for fully optimized builds.
+class CSEConfigFull : public CSEConfigBase {
 public:
-  virtual ~CSEConfig() = default;
-  // Hook for defining which Generic instructions should be CSEd.
-  // GISelCSEInfo currently only calls this hook when dealing with generic
-  // opcodes.
-  virtual bool shouldCSEOpc(unsigned Opc);
+  virtual ~CSEConfigFull() = default;
+  virtual bool shouldCSEOpc(unsigned Opc) override;
 };
 
-// TODO: Find a better place for this.
 // Commonly used for O0 config.
-class CSEConfigConstantOnly : public CSEConfig {
+class CSEConfigConstantOnly : public CSEConfigBase {
 public:
   virtual ~CSEConfigConstantOnly() = default;
   virtual bool shouldCSEOpc(unsigned Opc) override;
 };
 
+// Returns the standard expected CSEConfig for the given optimization level.
+// We have this logic here so targets can make use of it from their derived
+// TargetPassConfig, but can't put this logic into TargetPassConfig directly
+// because the CodeGen library can't depend on GlobalISel.
+std::unique_ptr<CSEConfigBase>
+getStandardCSEConfigForOpt(CodeGenOpt::Level Level);
+
 /// The CSE Analysis object.
 /// This installs itself as a delegate to the MachineFunction to track
 /// new instructions as well as deletions. It however will not be able to
@@ -73,7 +76,7 @@
   FoldingSet<UniqueMachineInstr> CSEMap;
   MachineRegisterInfo *MRI = nullptr;
   MachineFunction *MF = nullptr;
-  std::unique_ptr<CSEConfig> CSEOpt;
+  std::unique_ptr<CSEConfigBase> CSEOpt;
   /// Keep a cache of UniqueInstrs for each MachineInstr. In GISel,
   /// often instructions are mutated (while their ID has completely changed).
   /// Whenever mutation happens, invalidate the UniqueMachineInstr for the
@@ -138,7 +141,9 @@
 
   void releaseMemory();
 
-  void setCSEConfig(std::unique_ptr<CSEConfig> Opt) { CSEOpt = std::move(Opt); }
+  void setCSEConfig(std::unique_ptr<CSEConfigBase> Opt) {
+    CSEOpt = std::move(Opt);
+  }
 
   bool shouldCSE(unsigned Opc) const;
 
@@ -198,11 +203,12 @@
   bool AlreadyComputed = false;
 
 public:
-  /// Takes a CSEConfig object that defines what opcodes get CSEd.
+  /// Takes a CSEConfigBase object that defines what opcodes get CSEd.
   /// If CSEConfig is already set, and the CSE Analysis has been preserved,
   /// it will not use the new CSEOpt(use Recompute to force using the new
   /// CSEOpt).
-  GISelCSEInfo &get(std::unique_ptr<CSEConfig> CSEOpt, bool ReCompute = false);
+  GISelCSEInfo &get(std::unique_ptr<CSEConfigBase> CSEOpt,
+                    bool ReCompute = false);
   void setMF(MachineFunction &MFunc) { MF = &MFunc; }
   void setComputed(bool Computed) { AlreadyComputed = Computed; }
   void releaseMemory() { Info.releaseMemory(); }
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h
index 9b72b70..d8d15bd 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -15,6 +15,7 @@
 #define LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
 
 #include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
 #include "llvm/CodeGen/CallingConvLower.h"
 #include "llvm/CodeGen/TargetCallingConv.h"
 #include "llvm/IR/CallSite.h"
@@ -42,14 +43,19 @@
   virtual void anchor();
 public:
   struct ArgInfo {
-    unsigned Reg;
+    SmallVector<Register, 4> Regs;
     Type *Ty;
     ISD::ArgFlagsTy Flags;
     bool IsFixed;
 
-    ArgInfo(unsigned Reg, Type *Ty, ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy{},
-            bool IsFixed = true)
-        : Reg(Reg), Ty(Ty), Flags(Flags), IsFixed(IsFixed) {}
+    ArgInfo(ArrayRef<Register> Regs, Type *Ty,
+            ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy{}, bool IsFixed = true)
+        : Regs(Regs.begin(), Regs.end()), Ty(Ty), Flags(Flags),
+          IsFixed(IsFixed) {
+      // FIXME: We should have just one way of saying "no register".
+      assert((Ty->isVoidTy() == (Regs.empty() || Regs[0] == 0)) &&
+             "only void types should have no register");
+    }
   };
 
   /// Argument handling is mostly uniform between the four places that
@@ -65,24 +71,28 @@
 
     virtual ~ValueHandler() = default;
 
+    /// Returns true if the handler is dealing with formal arguments,
+    /// not with return values etc.
+    virtual bool isArgumentHandler() const { return false; }
+
     /// Materialize a VReg containing the address of the specified
     /// stack-based object. This is either based on a FrameIndex or
     /// direct SP manipulation, depending on the context. \p MPO
     /// should be initialized to an appropriate description of the
     /// address created.
-    virtual unsigned getStackAddress(uint64_t Size, int64_t Offset,
+    virtual Register getStackAddress(uint64_t Size, int64_t Offset,
                                      MachinePointerInfo &MPO) = 0;
 
     /// The specified value has been assigned to a physical register,
     /// handle the appropriate COPY (either to or from) and mark any
     /// relevant uses/defines as needed.
-    virtual void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
+    virtual void assignValueToReg(Register ValVReg, Register PhysReg,
                                   CCValAssign &VA) = 0;
 
     /// The specified value has been assigned to a stack
     /// location. Load or store it there, with appropriate extension
     /// if necessary.
-    virtual void assignValueToAddress(unsigned ValVReg, unsigned Addr,
+    virtual void assignValueToAddress(Register ValVReg, Register Addr,
                                       uint64_t Size, MachinePointerInfo &MPO,
                                       CCValAssign &VA) = 0;
 
@@ -97,7 +107,7 @@
       llvm_unreachable("Custom values not supported");
     }
 
-    unsigned extendRegister(unsigned ValReg, CCValAssign &VA);
+    Register extendRegister(Register ValReg, CCValAssign &VA);
 
     virtual bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
                            CCValAssign::LocInfo LocInfo, const ArgInfo &Info,
@@ -129,6 +139,24 @@
   void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL,
                    const FuncInfoTy &FuncInfo) const;
 
+  /// Generate instructions for packing \p SrcRegs into one big register
+  /// corresponding to the aggregate type \p PackedTy.
+  ///
+  /// \param SrcRegs should contain one virtual register for each base type in
+  ///                \p PackedTy, as returned by computeValueLLTs.
+  ///
+  /// \return The packed register.
+  Register packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy,
+                    MachineIRBuilder &MIRBuilder) const;
+
+  /// Generate instructions for unpacking \p SrcReg into the \p DstRegs
+  /// corresponding to the aggregate type \p PackedTy.
+  ///
+  /// \param DstRegs should contain one virtual register for each base type in
+  ///        \p PackedTy, as returned by computeValueLLTs.
+  void unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg, Type *PackedTy,
+                  MachineIRBuilder &MIRBuilder) const;
+
   /// Invoke Handler::assignArg on each of the given \p Args and then use
   /// \p Callback to move them to the assigned locations.
   ///
@@ -140,28 +168,51 @@
   CallLowering(const TargetLowering *TLI) : TLI(TLI) {}
   virtual ~CallLowering() = default;
 
+  /// \return true if the target is capable of handling swifterror values that
+  /// have been promoted to a specified register. The extended versions of
+  /// lowerReturn and lowerCall should be implemented.
+  virtual bool supportSwiftError() const {
+    return false;
+  }
+
   /// This hook must be implemented to lower outgoing return values, described
   /// by \p Val, into the specified virtual registers \p VRegs.
   /// This hook is used by GlobalISel.
   ///
+  /// \p SwiftErrorVReg is non-zero if the function has a swifterror parameter
+  /// that needs to be implicitly returned.
+  ///
   /// \return True if the lowering succeeds, false otherwise.
   virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
-                           ArrayRef<unsigned> VRegs) const {
+                           ArrayRef<Register> VRegs,
+                           Register SwiftErrorVReg) const {
+    if (!supportSwiftError()) {
+      assert(SwiftErrorVReg == 0 && "attempt to use unsupported swifterror");
+      return lowerReturn(MIRBuilder, Val, VRegs);
+    }
+    return false;
+  }
+
+  /// This hook behaves as the extended lowerReturn function, but for targets
+  /// that do not support swifterror value promotion.
+  virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
+                           ArrayRef<Register> VRegs) const {
     return false;
   }
 
   /// This hook must be implemented to lower the incoming (formal)
-  /// arguments, described by \p Args, for GlobalISel. Each argument
-  /// must end up in the related virtual register described by VRegs.
-  /// In other words, the first argument should end up in VRegs[0],
-  /// the second in VRegs[1], and so on.
+  /// arguments, described by \p VRegs, for GlobalISel. Each argument
+  /// must end up in the related virtual registers described by \p VRegs.
+  /// In other words, the first argument should end up in \c VRegs[0],
+  /// the second in \c VRegs[1], and so on. For each argument, there will be one
+  /// register for each non-aggregate type, as returned by \c computeValueLLTs.
   /// \p MIRBuilder is set to the proper insertion for the argument
   /// lowering.
   ///
   /// \return True if the lowering succeeded, false otherwise.
   virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder,
                                     const Function &F,
-                                    ArrayRef<unsigned> VRegs) const {
+                                    ArrayRef<ArrayRef<Register>> VRegs) const {
     return false;
   }
 
@@ -173,20 +224,31 @@
   /// \p Callee is the destination of the call. It should be either a register,
   /// globaladdress, or externalsymbol.
   ///
-  /// \p ResTy is the type returned by the function
+  /// \p OrigRet is a descriptor for the return type of the function.
   ///
-  /// \p ResReg is the generic virtual register that the returned
-  /// value should be lowered into.
+  /// \p OrigArgs is a list of descriptors of the arguments passed to the
+  /// function.
   ///
-  /// \p ArgTys is a list of the types each member of \p ArgRegs has; used by
-  /// the target to decide which register/stack slot should be allocated.
-  ///
-  /// \p ArgRegs is a list of virtual registers containing each argument that
-  /// needs to be passed.
+  /// \p SwiftErrorVReg is non-zero if the call has a swifterror inout
+  /// parameter, and contains the vreg that the swifterror should be copied into
+  /// after the call.
   ///
   /// \return true if the lowering succeeded, false otherwise.
   virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
                          const MachineOperand &Callee, const ArgInfo &OrigRet,
+                         ArrayRef<ArgInfo> OrigArgs,
+                         Register SwiftErrorVReg) const {
+    if (!supportSwiftError()) {
+      assert(SwiftErrorVReg == 0 && "trying to use unsupported swifterror");
+      return lowerCall(MIRBuilder, CallConv, Callee, OrigRet, OrigArgs);
+    }
+    return false;
+  }
+
+  /// This hook behaves as the extended lowerCall function, but for targets that
+  /// do not support swifterror value promotion.
+  virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
+                         const MachineOperand &Callee, const ArgInfo &OrigRet,
                          ArrayRef<ArgInfo> OrigArgs) const {
     return false;
   }
@@ -196,11 +258,18 @@
   ///
   /// \p CI is the call/invoke instruction.
   ///
-  /// \p ResReg is a register where the call's return value should be stored (or
-  /// 0 if there is no return value).
+  /// \p ResRegs are the registers where the call's return value should be
+  /// stored (or 0 if there is no return value). There will be one register for
+  /// each non-aggregate type, as returned by \c computeValueLLTs.
   ///
-  /// \p ArgRegs is a list of virtual registers containing each argument that
-  /// needs to be passed.
+  /// \p ArgRegs is a list of lists of virtual registers containing each
+  /// argument that needs to be passed (argument \c i should be placed in \c
+  /// ArgRegs[i]). For each argument, there will be one register for each
+  /// non-aggregate type, as returned by \c computeValueLLTs.
+  ///
+  /// \p SwiftErrorVReg is non-zero if the call has a swifterror inout
+  /// parameter, and contains the vreg that the swifterror should be copied into
+  /// after the call.
   ///
   /// \p GetCalleeReg is a callback to materialize a register for the callee if
   /// the target determines it cannot jump to the destination based purely on \p
@@ -209,7 +278,8 @@
   ///
   /// \return true if the lowering succeeded, false otherwise.
   bool lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
-                 unsigned ResReg, ArrayRef<unsigned> ArgRegs,
+                 ArrayRef<Register> ResRegs,
+                 ArrayRef<ArrayRef<Register>> ArgRegs, Register SwiftErrorVReg,
                  std::function<unsigned()> GetCalleeReg) const;
 };
 
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index ee30ba9..0c50c9c 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -17,6 +17,9 @@
 #ifndef LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
 #define LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
 
+#include "llvm/CodeGen/LowLevelType.h"
+#include "llvm/CodeGen/Register.h"
+
 namespace llvm {
 
 class GISelChangeObserver;
@@ -25,6 +28,12 @@
 class MachineInstr;
 class MachineOperand;
 
+struct PreferredTuple {
+  LLT Ty;                // The result type of the extend.
+  unsigned ExtendOpcode; // G_ANYEXT/G_SEXT/G_ZEXT
+  MachineInstr *MI;
+};
+
 class CombinerHelper {
   MachineIRBuilder &Builder;
   MachineRegisterInfo &MRI;
@@ -34,20 +43,27 @@
   CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B);
 
   /// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes
-  void replaceRegWith(MachineRegisterInfo &MRI, unsigned FromReg, unsigned ToReg) const;
+  void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const;
 
   /// Replace a single register operand with a new register and inform the
   /// observer of the changes.
   void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp,
-                        unsigned ToReg) const;
+                        Register ToReg) const;
 
   /// If \p MI is COPY, try to combine it.
   /// Returns true if MI changed.
   bool tryCombineCopy(MachineInstr &MI);
+  bool matchCombineCopy(MachineInstr &MI);
+  void applyCombineCopy(MachineInstr &MI);
 
   /// If \p MI is extend that consumes the result of a load, try to combine it.
   /// Returns true if MI changed.
   bool tryCombineExtendingLoads(MachineInstr &MI);
+  bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo);
+  void applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo);
+
+  bool matchCombineBr(MachineInstr &MI);
+  bool tryCombineBr(MachineInstr &MI);
 
   /// Try to transform \p MI by using all of the above
   /// combine functions. Returns true if changed.
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelWorkList.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
index 626a666..b0bb519 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/GISelWorkList.h
@@ -32,6 +32,10 @@
   SmallVector<MachineInstr *, N> Worklist;
   DenseMap<MachineInstr *, unsigned> WorklistMap;
 
+#ifndef NDEBUG
+  bool Finalized = true;
+#endif
+
 public:
   GISelWorkList() : WorklistMap(N) {}
 
@@ -39,16 +43,50 @@
 
   unsigned size() const { return WorklistMap.size(); }
 
+  // Since we don't know ahead of time how many instructions we're going to add
+  // to the worklist, and migrating densemap's elements is quite expensive
+  // everytime we resize, only insert to the smallvector (typically during the
+  // initial phase of populating lists). Before the worklist can be used,
+  // finalize should be called. Also assert with NDEBUG if list is ever used
+  // without finalizing. Note that unlike insert, we won't check for duplicates
+  // - so the ideal place to use this is during the initial prepopulating phase
+  // of most passes.
+  void deferred_insert(MachineInstr *I) {
+    Worklist.push_back(I);
+#ifndef NDEBUG
+    Finalized = false;
+#endif
+  }
+
+  // This should only be called when using deferred_insert.
+  // This asserts that the WorklistMap is empty, and then
+  // inserts all the elements in the Worklist into the map.
+  // It also asserts if there are any duplicate elements found.
+  void finalize() {
+    assert(WorklistMap.empty() && "Expecting empty worklistmap");
+    if (Worklist.size() > N)
+      WorklistMap.reserve(Worklist.size());
+    for (unsigned i = 0; i < Worklist.size(); ++i)
+      if (!WorklistMap.try_emplace(Worklist[i], i).second)
+        llvm_unreachable("Duplicate elements in the list");
+#ifndef NDEBUG
+    Finalized = true;
+#endif
+  }
+
   /// Add the specified instruction to the worklist if it isn't already in it.
   void insert(MachineInstr *I) {
+    assert(Finalized && "GISelWorkList used without finalizing");
     if (WorklistMap.try_emplace(I, Worklist.size()).second)
       Worklist.push_back(I);
   }
 
   /// Remove I from the worklist if it exists.
   void remove(const MachineInstr *I) {
+    assert((Finalized || WorklistMap.empty()) && "Neither finalized nor empty");
     auto It = WorklistMap.find(I);
-    if (It == WorklistMap.end()) return; // Not in worklist.
+    if (It == WorklistMap.end())
+      return; // Not in worklist.
 
     // Don't bother moving everything down, just null out the slot.
     Worklist[It->second] = nullptr;
@@ -62,6 +100,7 @@
   }
 
   MachineInstr *pop_back_val() {
+    assert(Finalized && "GISelWorkList used without finalizing");
     MachineInstr *I;
     do {
       I = Worklist.pop_back_val();
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index c75d823..8654ba8 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -22,7 +22,9 @@
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
 #include "llvm/CodeGen/GlobalISel/Types.h"
+#include "llvm/CodeGen/SwiftErrorValueTracking.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/SwitchLoweringUtils.h"
 #include "llvm/IR/Intrinsics.h"
 #include "llvm/Support/Allocator.h"
 #include <memory>
@@ -36,6 +38,7 @@
 class CallLowering;
 class Constant;
 class DataLayout;
+class FunctionLoweringInfo;
 class Instruction;
 class MachineBasicBlock;
 class MachineFunction;
@@ -68,7 +71,7 @@
   public:
     ValueToVRegInfo() = default;
 
-    using VRegListT = SmallVector<unsigned, 1>;
+    using VRegListT = SmallVector<Register, 1>;
     using OffsetListT = SmallVector<uint64_t, 1>;
 
     using const_vreg_iterator =
@@ -163,6 +166,8 @@
   /// this function.
   DenseMap<const AllocaInst *, int> FrameIndices;
 
+  SwiftErrorValueTracking SwiftError;
+
   /// \name Methods for translating form LLVM IR to MachineInstr.
   /// \see ::translate for general information on the translate methods.
   /// @{
@@ -195,7 +200,7 @@
   /// the function.
   ///
   /// \return true if the materialization succeeded.
-  bool translate(const Constant &C, unsigned Reg);
+  bool translate(const Constant &C, Register Reg);
 
   /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
   /// emitted.
@@ -211,7 +216,7 @@
   bool translateMemfunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
                         unsigned ID);
 
-  void getStackGuard(unsigned DstReg, MachineIRBuilder &MIRBuilder);
+  void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
 
   bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
                                   MachineIRBuilder &MIRBuilder);
@@ -232,14 +237,6 @@
 
   bool translateInlineAsm(const CallInst &CI, MachineIRBuilder &MIRBuilder);
 
-  // FIXME: temporary function to expose previous interface to call lowering
-  // until it is refactored.
-  /// Combines all component registers of \p V into a single scalar with size
-  /// "max(Offsets) + last size".
-  unsigned packRegs(const Value &V, MachineIRBuilder &MIRBuilder);
-
-  void unpackRegs(const Value &V, unsigned Src, MachineIRBuilder &MIRBuilder);
-
   /// Returns true if the value should be split into multiple LLTs.
   /// If \p Offsets is given then the split type's offsets will be stored in it.
   /// If \p Offsets is not empty it will be cleared first.
@@ -290,7 +287,42 @@
   /// \pre \p U is a branch instruction.
   bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
 
+  // Begin switch lowering functions.
+  bool emitJumpTableHeader(SwitchCG::JumpTable &JT,
+                           SwitchCG::JumpTableHeader &JTH,
+                           MachineBasicBlock *HeaderBB);
+  void emitJumpTable(SwitchCG::JumpTable &JT, MachineBasicBlock *MBB);
+
+  void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
+                      MachineIRBuilder &MIB);
+
+  bool lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
+                              MachineBasicBlock *SwitchMBB,
+                              MachineBasicBlock *CurMBB,
+                              MachineBasicBlock *DefaultMBB,
+                              MachineIRBuilder &MIB,
+                              MachineFunction::iterator BBI,
+                              BranchProbability UnhandledProbs,
+                              SwitchCG::CaseClusterIt I,
+                              MachineBasicBlock *Fallthrough,
+                              bool FallthroughUnreachable);
+
+  bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
+                                Value *Cond,
+                                MachineBasicBlock *Fallthrough,
+                                bool FallthroughUnreachable,
+                                BranchProbability UnhandledProbs,
+                                MachineBasicBlock *CurMBB,
+                                MachineIRBuilder &MIB,
+                                MachineBasicBlock *SwitchMBB);
+
+  bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
+                           MachineBasicBlock *SwitchMBB,
+                           MachineBasicBlock *DefaultMBB,
+                           MachineIRBuilder &MIB);
+
   bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder);
+  // End switch lowering section.
 
   bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder);
 
@@ -416,6 +448,7 @@
 
   bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder);
   bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder);
+  bool translateFence(const User &U, MachineIRBuilder &MIRBuilder);
 
   // Stubs to keep the compiler happy while we implement the rest of the
   // translation.
@@ -431,9 +464,6 @@
   bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
     return false;
   }
-  bool translateFence(const User &U, MachineIRBuilder &MIRBuilder) {
-    return false;
-  }
   bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
     return translateCast(TargetOpcode::G_ADDRSPACE_CAST, U, MIRBuilder);
   }
@@ -478,19 +508,50 @@
   /// Current optimization remark emitter. Used to report failures.
   std::unique_ptr<OptimizationRemarkEmitter> ORE;
 
+  FunctionLoweringInfo FuncInfo;
+
+  // True when either the Target Machine specifies no optimizations or the
+  // function has the optnone attribute.
+  bool EnableOpts = false;
+
+  /// Switch analysis and optimization.
+  class GISelSwitchLowering : public SwitchCG::SwitchLowering {
+  public:
+    GISelSwitchLowering(IRTranslator *irt, FunctionLoweringInfo &funcinfo)
+        : SwitchLowering(funcinfo), IRT(irt) {
+      assert(irt && "irt is null!");
+    }
+
+    virtual void addSuccessorWithProb(
+        MachineBasicBlock *Src, MachineBasicBlock *Dst,
+        BranchProbability Prob = BranchProbability::getUnknown()) override {
+      IRT->addSuccessorWithProb(Src, Dst, Prob);
+    }
+
+    virtual ~GISelSwitchLowering() = default;
+
+  private:
+    IRTranslator *IRT;
+  };
+
+  std::unique_ptr<GISelSwitchLowering> SL;
+
   // * Insert all the code needed to materialize the constants
   // at the proper place. E.g., Entry block or dominator block
   // of each constant depending on how fancy we want to be.
   // * Clear the different maps.
   void finalizeFunction();
 
+  // Handle emitting jump tables for each basic block.
+  void finalizeBasicBlock();
+
   /// Get the VRegs that represent \p Val.
   /// Non-aggregate types have just one corresponding VReg and the list can be
   /// used as a single "unsigned". Aggregates get flattened. If such VRegs do
   /// not exist, they are created.
-  ArrayRef<unsigned> getOrCreateVRegs(const Value &Val);
+  ArrayRef<Register> getOrCreateVRegs(const Value &Val);
 
-  unsigned getOrCreateVReg(const Value &Val) {
+  Register getOrCreateVReg(const Value &Val) {
     auto Regs = getOrCreateVRegs(Val);
     if (Regs.empty())
       return 0;
@@ -534,6 +595,14 @@
     return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(*Edge.first));
   }
 
+  /// Return branch probability calculated by BranchProbabilityInfo for IR
+  /// blocks.
+  BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
+                                       const MachineBasicBlock *Dst) const;
+
+  void addSuccessorWithProb(MachineBasicBlock *Src, MachineBasicBlock *Dst,
+                            BranchProbability Prob);
+
 public:
   // Ctor, nothing fancy.
   IRTranslator();
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
index 87ef2f8..e010180 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
@@ -437,15 +437,15 @@
 
       unsigned Size = MRI.getType(MO.getReg()).getSizeInBits();
       if (MatcherOpcode == GIM_CheckMemorySizeEqualToLLT &&
-          MMO->getSize() * 8 != Size) {
+          MMO->getSizeInBits() != Size) {
         if (handleReject() == RejectAndGiveUp)
           return false;
       } else if (MatcherOpcode == GIM_CheckMemorySizeLessThanLLT &&
-                 MMO->getSize() * 8 >= Size) {
+                 MMO->getSizeInBits() >= Size) {
         if (handleReject() == RejectAndGiveUp)
           return false;
       } else if (MatcherOpcode == GIM_CheckMemorySizeGreaterThanLLT &&
-                 MMO->getSize() * 8 <= Size)
+                 MMO->getSizeInBits() <= Size)
         if (handleReject() == RejectAndGiveUp)
           return false;
 
@@ -478,17 +478,19 @@
                              << InsnID << "]->getOperand(" << OpIdx
                              << "), SizeInBits=" << SizeInBits << ")\n");
       assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+      const LLT Ty = MRI.getType(MO.getReg());
+
       // iPTR must be looked up in the target.
       if (SizeInBits == 0) {
         MachineFunction *MF = State.MIs[InsnID]->getParent()->getParent();
-        SizeInBits = MF->getDataLayout().getPointerSizeInBits(0);
+        const unsigned AddrSpace = Ty.getAddressSpace();
+        SizeInBits = MF->getDataLayout().getPointerSizeInBits(AddrSpace);
       }
 
       assert(SizeInBits != 0 && "Pointer size must be known");
 
-      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
       if (MO.isReg()) {
-        const LLT &Ty = MRI.getType(MO.getReg());
         if (!Ty.isPointer() || Ty.getSizeInBits() != SizeInBits)
           if (handleReject() == RejectAndGiveUp)
             return false;
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
index e7680e1..a22778b 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -28,6 +28,18 @@
   MachineRegisterInfo &MRI;
   const LegalizerInfo &LI;
 
+  static bool isArtifactCast(unsigned Opc) {
+    switch (Opc) {
+    case TargetOpcode::G_TRUNC:
+    case TargetOpcode::G_SEXT:
+    case TargetOpcode::G_ZEXT:
+    case TargetOpcode::G_ANYEXT:
+      return true;
+    default:
+      return false;
+    }
+  }
+
 public:
   LegalizationArtifactCombiner(MachineIRBuilder &B, MachineRegisterInfo &MRI,
                     const LegalizerInfo &LI)
@@ -39,11 +51,11 @@
       return false;
 
     Builder.setInstr(MI);
-    unsigned DstReg = MI.getOperand(0).getReg();
-    unsigned SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
+    Register DstReg = MI.getOperand(0).getReg();
+    Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
 
     // aext(trunc x) - > aext/copy/trunc x
-    unsigned TruncSrc;
+    Register TruncSrc;
     if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
       LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
       Builder.buildAnyExtOrTrunc(DstReg, TruncSrc);
@@ -52,7 +64,7 @@
     }
 
     // aext([asz]ext x) -> [asz]ext x
-    unsigned ExtSrc;
+    Register ExtSrc;
     MachineInstr *ExtMI;
     if (mi_match(SrcReg, MRI,
                  m_all_of(m_MInstr(ExtMI), m_any_of(m_GAnyExt(m_Reg(ExtSrc)),
@@ -62,6 +74,20 @@
       markInstAndDefDead(MI, *ExtMI, DeadInsts);
       return true;
     }
+
+    // Try to fold aext(g_constant) when the larger constant type is legal.
+    // Can't use MIPattern because we don't have a specific constant in mind.
+    auto *SrcMI = MRI.getVRegDef(SrcReg);
+    if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
+      const LLT &DstTy = MRI.getType(DstReg);
+      if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
+        auto &CstVal = SrcMI->getOperand(1);
+        Builder.buildConstant(
+            DstReg, CstVal.getCImm()->getValue().sext(DstTy.getSizeInBits()));
+        markInstAndDefDead(MI, *SrcMI, DeadInsts);
+        return true;
+      }
+    }
     return tryFoldImplicitDef(MI, DeadInsts);
   }
 
@@ -72,11 +98,11 @@
       return false;
 
     Builder.setInstr(MI);
-    unsigned DstReg = MI.getOperand(0).getReg();
-    unsigned SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
+    Register DstReg = MI.getOperand(0).getReg();
+    Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
 
     // zext(trunc x) - > and (aext/copy/trunc x), mask
-    unsigned TruncSrc;
+    Register TruncSrc;
     if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
       LLT DstTy = MRI.getType(DstReg);
       if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
@@ -91,6 +117,20 @@
       markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
       return true;
     }
+
+    // Try to fold zext(g_constant) when the larger constant type is legal.
+    // Can't use MIPattern because we don't have a specific constant in mind.
+    auto *SrcMI = MRI.getVRegDef(SrcReg);
+    if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
+      const LLT &DstTy = MRI.getType(DstReg);
+      if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
+        auto &CstVal = SrcMI->getOperand(1);
+        Builder.buildConstant(
+            DstReg, CstVal.getCImm()->getValue().zext(DstTy.getSizeInBits()));
+        markInstAndDefDead(MI, *SrcMI, DeadInsts);
+        return true;
+      }
+    }
     return tryFoldImplicitDef(MI, DeadInsts);
   }
 
@@ -101,11 +141,11 @@
       return false;
 
     Builder.setInstr(MI);
-    unsigned DstReg = MI.getOperand(0).getReg();
-    unsigned SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
+    Register DstReg = MI.getOperand(0).getReg();
+    Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
 
     // sext(trunc x) - > ashr (shl (aext/copy/trunc x), c), c
-    unsigned TruncSrc;
+    Register TruncSrc;
     if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
       LLT DstTy = MRI.getType(DstReg);
       // Guess on the RHS shift amount type, which should be re-legalized if
@@ -139,7 +179,7 @@
     if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
                                            MI.getOperand(1).getReg(), MRI)) {
       Builder.setInstr(MI);
-      unsigned DstReg = MI.getOperand(0).getReg();
+      Register DstReg = MI.getOperand(0).getReg();
       LLT DstTy = MRI.getType(DstReg);
 
       if (Opcode == TargetOpcode::G_ANYEXT) {
@@ -180,21 +220,33 @@
       return false;
 
     unsigned NumDefs = MI.getNumOperands() - 1;
+    MachineInstr *SrcDef =
+        getDefIgnoringCopies(MI.getOperand(NumDefs).getReg(), MRI);
+    if (!SrcDef)
+      return false;
 
     LLT OpTy = MRI.getType(MI.getOperand(NumDefs).getReg());
     LLT DestTy = MRI.getType(MI.getOperand(0).getReg());
+    MachineInstr *MergeI = SrcDef;
+    unsigned ConvertOp = 0;
 
+    // Handle intermediate conversions
+    unsigned SrcOp = SrcDef->getOpcode();
+    if (isArtifactCast(SrcOp)) {
+      ConvertOp = SrcOp;
+      MergeI = getDefIgnoringCopies(SrcDef->getOperand(1).getReg(), MRI);
+    }
+
+    // FIXME: Handle scalarizing concat_vectors (scalar result type with vector
+    // source)
     unsigned MergingOpcode = getMergeOpcode(OpTy, DestTy);
-    MachineInstr *MergeI =
-        getOpcodeDef(MergingOpcode, MI.getOperand(NumDefs).getReg(), MRI);
-
-    if (!MergeI)
+    if (!MergeI || MergeI->getOpcode() != MergingOpcode)
       return false;
 
     const unsigned NumMergeRegs = MergeI->getNumOperands() - 1;
 
     if (NumMergeRegs < NumDefs) {
-      if (NumDefs % NumMergeRegs != 0)
+      if (ConvertOp != 0 || NumDefs % NumMergeRegs != 0)
         return false;
 
       Builder.setInstr(MI);
@@ -207,7 +259,7 @@
 
       const unsigned NewNumDefs = NumDefs / NumMergeRegs;
       for (unsigned Idx = 0; Idx < NumMergeRegs; ++Idx) {
-        SmallVector<unsigned, 2> DstRegs;
+        SmallVector<Register, 2> DstRegs;
         for (unsigned j = 0, DefIdx = Idx * NewNumDefs; j < NewNumDefs;
              ++j, ++DefIdx)
           DstRegs.push_back(MI.getOperand(DefIdx).getReg());
@@ -216,7 +268,7 @@
       }
 
     } else if (NumMergeRegs > NumDefs) {
-      if (NumMergeRegs % NumDefs != 0)
+      if (ConvertOp != 0 || NumMergeRegs % NumDefs != 0)
         return false;
 
       Builder.setInstr(MI);
@@ -229,7 +281,7 @@
 
       const unsigned NumRegs = NumMergeRegs / NumDefs;
       for (unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) {
-        SmallVector<unsigned, 2> Regs;
+        SmallVector<Register, 2> Regs;
         for (unsigned j = 0, Idx = NumRegs * DefIdx + 1; j < NumRegs;
              ++j, ++Idx)
           Regs.push_back(MergeI->getOperand(Idx).getReg());
@@ -238,10 +290,22 @@
       }
 
     } else {
+      LLT MergeSrcTy = MRI.getType(MergeI->getOperand(1).getReg());
+      if (ConvertOp) {
+        Builder.setInstr(MI);
+
+        for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
+          Register MergeSrc = MergeI->getOperand(Idx + 1).getReg();
+          Builder.buildInstr(ConvertOp, {MI.getOperand(Idx).getReg()},
+                             {MergeSrc});
+        }
+
+        markInstAndDefDead(MI, *MergeI, DeadInsts);
+        return true;
+      }
       // FIXME: is a COPY appropriate if the types mismatch? We know both
       // registers are allocatable by now.
-      if (MRI.getType(MI.getOperand(0).getReg()) !=
-          MRI.getType(MergeI->getOperand(1).getReg()))
+      if (DestTy != MergeSrcTy)
         return false;
 
       for (unsigned Idx = 0; Idx < NumDefs; ++Idx)
@@ -317,7 +381,13 @@
   /// Adds instructions that are dead as a result of the combine
   /// into DeadInsts, which can include MI.
   bool tryCombineInstruction(MachineInstr &MI,
-                             SmallVectorImpl<MachineInstr *> &DeadInsts) {
+                             SmallVectorImpl<MachineInstr *> &DeadInsts,
+                             GISelObserverWrapper &WrapperObserver) {
+    // This might be a recursive call, and we might have DeadInsts already
+    // populated. To avoid bad things happening later with multiple vreg defs
+    // etc, process the dead instructions now if any.
+    if (!DeadInsts.empty())
+      deleteMarkedDeadInsts(DeadInsts, WrapperObserver);
     switch (MI.getOpcode()) {
     default:
       return false;
@@ -334,7 +404,7 @@
     case TargetOpcode::G_TRUNC: {
       bool Changed = false;
       for (auto &Use : MRI.use_instructions(MI.getOperand(0).getReg()))
-        Changed |= tryCombineInstruction(Use, DeadInsts);
+        Changed |= tryCombineInstruction(Use, DeadInsts, WrapperObserver);
       return Changed;
     }
     }
@@ -383,8 +453,10 @@
       MachineInstr *TmpDef = MRI.getVRegDef(PrevRegSrc);
       if (MRI.hasOneUse(PrevRegSrc)) {
         if (TmpDef != &DefMI) {
-          assert(TmpDef->getOpcode() == TargetOpcode::COPY &&
-                 "Expecting copy here");
+          assert((TmpDef->getOpcode() == TargetOpcode::COPY ||
+                  isArtifactCast(TmpDef->getOpcode())) &&
+                 "Expecting copy or artifact cast here");
+
           DeadInsts.push_back(TmpDef);
         }
       } else
@@ -395,6 +467,22 @@
       DeadInsts.push_back(&DefMI);
   }
 
+  /// Erase the dead instructions in the list and call the observer hooks.
+  /// Normally the Legalizer will deal with erasing instructions that have been
+  /// marked dead. However, for the trunc(ext(x)) cases we can end up trying to
+  /// process instructions which have been marked dead, but otherwise break the
+  /// MIR by introducing multiple vreg defs. For those cases, allow the combines
+  /// to explicitly delete the instructions before we run into trouble.
+  void deleteMarkedDeadInsts(SmallVectorImpl<MachineInstr *> &DeadInsts,
+                             GISelObserverWrapper &WrapperObserver) {
+    for (auto *DeadMI : DeadInsts) {
+      LLVM_DEBUG(dbgs() << *DeadMI << "Is dead, eagerly deleting\n");
+      WrapperObserver.erasingInstr(*DeadMI);
+      DeadMI->eraseFromParentAndMarkDBGValuesForRemoval();
+    }
+    DeadInsts.clear();
+  }
+
   /// Checks if the target legalizer info has specified anything about the
   /// instruction, or if unsupported.
   bool isInstUnsupported(const LegalityQuery &Query) const {
@@ -403,6 +491,10 @@
     return Step.Action == Unsupported || Step.Action == NotFound;
   }
 
+  bool isInstLegal(const LegalityQuery &Query) const {
+    return LI.getAction(Query).Action == LegalizeActions::Legal;
+  }
+
   bool isConstantUnsupported(LLT Ty) const {
     if (!Ty.isVector())
       return isInstUnsupported({TargetOpcode::G_CONSTANT, {Ty}});
@@ -414,8 +506,8 @@
 
   /// Looks through copy instructions and returns the actual
   /// source register.
-  unsigned lookThroughCopyInstrs(unsigned Reg) {
-    unsigned TmpReg;
+  unsigned lookThroughCopyInstrs(Register Reg) {
+    Register TmpReg;
     while (mi_match(Reg, MRI, m_Copy(m_Reg(TmpReg)))) {
       if (MRI.getType(TmpReg).isValid())
         Reg = TmpReg;
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Legalizer.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Legalizer.h
index 0181650..13cf3f7 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Legalizer.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Legalizer.h
@@ -54,6 +54,11 @@
         MachineFunctionProperties::Property::Legalized);
   }
 
+  MachineFunctionProperties getClearedProperties() const override {
+    return MachineFunctionProperties()
+      .set(MachineFunctionProperties::Property::NoPHIs);
+  }
+
   bool combineExtracts(MachineInstr &MI, MachineRegisterInfo &MRI,
                        const TargetInstrInfo &TII);
 
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 136356c..e8cb65f 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -124,6 +124,11 @@
   /// result elements, and replacing the vreg of the operand in place.
   void moreElementsVectorDst(MachineInstr &MI, LLT MoreTy, unsigned OpIdx);
 
+  /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
+  /// Use by producing a vector with undefined high elements, extracting the
+  /// original vector type, and replacing the vreg of the operand in place.
+  void moreElementsVectorSrc(MachineInstr &MI, LLT MoreTy, unsigned OpIdx);
+
   LegalizeResult
   widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
   LegalizeResult
@@ -136,14 +141,14 @@
   /// Helper function to split a wide generic register into bitwise blocks with
   /// the given Type (which implies the number of blocks needed). The generic
   /// registers created are appended to Ops, starting at bit 0 of Reg.
-  void extractParts(unsigned Reg, LLT Ty, int NumParts,
-                    SmallVectorImpl<unsigned> &VRegs);
+  void extractParts(Register Reg, LLT Ty, int NumParts,
+                    SmallVectorImpl<Register> &VRegs);
 
   /// Version which handles irregular splits.
-  bool extractParts(unsigned Reg, LLT RegTy, LLT MainTy,
+  bool extractParts(Register Reg, LLT RegTy, LLT MainTy,
                     LLT &LeftoverTy,
-                    SmallVectorImpl<unsigned> &VRegs,
-                    SmallVectorImpl<unsigned> &LeftoverVRegs);
+                    SmallVectorImpl<Register> &VRegs,
+                    SmallVectorImpl<Register> &LeftoverVRegs);
 
   /// Helper function to build a wide generic register \p DstReg of type \p
   /// RegTy from smaller parts. This will produce a G_MERGE_VALUES,
@@ -154,9 +159,16 @@
   ///
   /// If \p ResultTy does not evenly break into \p PartTy sized pieces, the
   /// remainder must be specified with \p LeftoverRegs of type \p LeftoverTy.
-  void insertParts(unsigned DstReg, LLT ResultTy,
-                   LLT PartTy, ArrayRef<unsigned> PartRegs,
-                   LLT LeftoverTy = LLT(), ArrayRef<unsigned> LeftoverRegs = {});
+  void insertParts(Register DstReg, LLT ResultTy,
+                   LLT PartTy, ArrayRef<Register> PartRegs,
+                   LLT LeftoverTy = LLT(), ArrayRef<Register> LeftoverRegs = {});
+
+  /// Perform generic multiplication of values held in multiple registers.
+  /// Generated instructions use only types NarrowTy and i1.
+  /// Destination can be same or two times size of the source.
+  void multiplyRegisters(SmallVectorImpl<Register> &DstRegs,
+                         ArrayRef<Register> Src1Regs,
+                         ArrayRef<Register> Src2Regs, LLT NarrowTy);
 
   LegalizeResult fewerElementsVectorImplicitDef(MachineInstr &MI,
                                                 unsigned TypeIdx, LLT NarrowTy);
@@ -181,6 +193,12 @@
   LegalizeResult
   fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
 
+  LegalizeResult fewerElementsVectorPhi(MachineInstr &MI,
+                                        unsigned TypeIdx, LLT NarrowTy);
+
+  LegalizeResult moreElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx,
+                                       LLT MoreTy);
+
   LegalizeResult
   reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
 
@@ -188,14 +206,21 @@
                                              LLT HalfTy, LLT ShiftAmtTy);
 
   LegalizeResult narrowScalarShift(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
-  LegalizeResult narrowScalarMul(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+  LegalizeResult narrowScalarMul(MachineInstr &MI, LLT Ty);
   LegalizeResult narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
   LegalizeResult narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
 
+  LegalizeResult narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
   LegalizeResult narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
 
   LegalizeResult lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
 
+  LegalizeResult lowerU64ToF32BitOps(MachineInstr &MI);
+  LegalizeResult lowerUITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+  LegalizeResult lowerSITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+  LegalizeResult lowerMinMax(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+  LegalizeResult lowerFCopySign(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+
   MachineRegisterInfo &MRI;
   const LegalizerInfo &LI;
   /// To keep track of changes made by the LegalizerHelper.
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
index 5979173..513c98f 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -92,6 +92,7 @@
   UseLegacyRules,
 };
 } // end namespace LegalizeActions
+raw_ostream &operator<<(raw_ostream &OS, LegalizeActions::LegalizeAction Action);
 
 using LegalizeActions::LegalizeAction;
 
@@ -122,6 +123,7 @@
 
   struct MemDesc {
     uint64_t SizeInBits;
+    uint64_t AlignInBits;
     AtomicOrdering Ordering;
   };
 
@@ -164,13 +166,23 @@
     std::function<std::pair<unsigned, LLT>(const LegalityQuery &)>;
 
 namespace LegalityPredicates {
-struct TypePairAndMemSize {
+struct TypePairAndMemDesc {
   LLT Type0;
   LLT Type1;
   uint64_t MemSize;
+  uint64_t Align;
 
-  bool operator==(const TypePairAndMemSize &Other) const {
+  bool operator==(const TypePairAndMemDesc &Other) const {
     return Type0 == Other.Type0 && Type1 == Other.Type1 &&
+           Align == Other.Align &&
+           MemSize == Other.MemSize;
+  }
+
+  /// \returns true if this memory access is legal with for the acecss described
+  /// by \p Other (The alignment is sufficient for the size and result type).
+  bool isCompatible(const TypePairAndMemDesc &Other) const {
+    return Type0 == Other.Type0 && Type1 == Other.Type1 &&
+           Align >= Other.Align &&
            MemSize == Other.MemSize;
   }
 };
@@ -199,9 +211,9 @@
               std::initializer_list<std::pair<LLT, LLT>> TypesInit);
 /// True iff the given types for the given pair of type indexes is one of the
 /// specified type pairs.
-LegalityPredicate typePairAndMemSizeInSet(
+LegalityPredicate typePairAndMemDescInSet(
     unsigned TypeIdx0, unsigned TypeIdx1, unsigned MMOIdx,
-    std::initializer_list<TypePairAndMemSize> TypesAndMemSizeInit);
+    std::initializer_list<TypePairAndMemDesc> TypesAndMemDescInit);
 /// True iff the specified type index is a scalar.
 LegalityPredicate isScalar(unsigned TypeIdx);
 /// True iff the specified type index is a vector.
@@ -455,13 +467,13 @@
     return actionFor(LegalizeAction::Legal, Types);
   }
   /// The instruction is legal when type indexes 0 and 1 along with the memory
-  /// size is any type and size tuple in the given list.
-  LegalizeRuleSet &legalForTypesWithMemSize(
-      std::initializer_list<LegalityPredicates::TypePairAndMemSize>
-          TypesAndMemSize) {
+  /// size and minimum alignment is any type and size tuple in the given list.
+  LegalizeRuleSet &legalForTypesWithMemDesc(
+      std::initializer_list<LegalityPredicates::TypePairAndMemDesc>
+          TypesAndMemDesc) {
     return actionIf(LegalizeAction::Legal,
-                    LegalityPredicates::typePairAndMemSizeInSet(
-                        typeIdx(0), typeIdx(1), /*MMOIdx*/ 0, TypesAndMemSize));
+                    LegalityPredicates::typePairAndMemDescInSet(
+                        typeIdx(0), typeIdx(1), /*MMOIdx*/ 0, TypesAndMemDesc));
   }
   /// The instruction is legal when type indexes 0 and 1 are both in the given
   /// list. That is, the type pair is in the cartesian product of the list.
@@ -474,6 +486,20 @@
                                             std::initializer_list<LLT> Types1) {
     return actionForCartesianProduct(LegalizeAction::Legal, Types0, Types1);
   }
+  /// The instruction is legal when type indexes 0, 1, and 2 are both their
+  /// respective lists.
+  LegalizeRuleSet &legalForCartesianProduct(std::initializer_list<LLT> Types0,
+                                            std::initializer_list<LLT> Types1,
+                                            std::initializer_list<LLT> Types2) {
+    return actionForCartesianProduct(LegalizeAction::Legal, Types0, Types1,
+                                     Types2);
+  }
+
+  LegalizeRuleSet &alwaysLegal() {
+    using namespace LegalizeMutations;
+    markAllTypeIdxsAsCovered();
+    return actionIf(LegalizeAction::Legal, always);
+  }
 
   /// The instruction is lowered.
   LegalizeRuleSet &lower() {
@@ -624,6 +650,13 @@
   LegalizeRuleSet &customFor(std::initializer_list<LLT> Types) {
     return actionFor(LegalizeAction::Custom, Types);
   }
+
+  /// The instruction is custom when type indexes 0 and 1 is any type pair in the
+  /// given list.
+  LegalizeRuleSet &customFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
+    return actionFor(LegalizeAction::Custom, Types);
+  }
+
   LegalizeRuleSet &customForCartesianProduct(std::initializer_list<LLT> Types) {
     return actionForCartesianProduct(LegalizeAction::Custom, Types);
   }
@@ -670,7 +703,7 @@
                     LegalizeMutations::scalarize(TypeIdx));
   }
 
-  /// Ensure the scalar is at least as wide as Ty.
+  /// Ensure the scalar or element is at least as wide as Ty.
   LegalizeRuleSet &minScalarOrElt(unsigned TypeIdx, const LLT &Ty) {
     using namespace LegalityPredicates;
     using namespace LegalizeMutations;
@@ -679,6 +712,17 @@
                     changeElementTo(typeIdx(TypeIdx), Ty));
   }
 
+  /// Ensure the scalar or element is at least as wide as Ty.
+  LegalizeRuleSet &minScalarOrEltIf(LegalityPredicate Predicate,
+                                    unsigned TypeIdx, const LLT &Ty) {
+    using namespace LegalityPredicates;
+    using namespace LegalizeMutations;
+    return actionIf(LegalizeAction::WidenScalar,
+                    all(Predicate, scalarOrEltNarrowerThan(
+                                       TypeIdx, Ty.getScalarSizeInBits())),
+                    changeElementTo(typeIdx(TypeIdx), Ty));
+  }
+
   /// Ensure the scalar is at least as wide as Ty.
   LegalizeRuleSet &minScalar(unsigned TypeIdx, const LLT &Ty) {
     using namespace LegalityPredicates;
@@ -749,6 +793,22 @@
         });
   }
 
+  /// Conditionally widen the scalar or elt to match the size of another.
+  LegalizeRuleSet &minScalarEltSameAsIf(LegalityPredicate Predicate,
+                                   unsigned TypeIdx, unsigned LargeTypeIdx) {
+    typeIdx(TypeIdx);
+    return widenScalarIf(
+        [=](const LegalityQuery &Query) {
+          return Query.Types[LargeTypeIdx].getScalarSizeInBits() >
+                     Query.Types[TypeIdx].getScalarSizeInBits() &&
+                 Predicate(Query);
+        },
+        [=](const LegalityQuery &Query) {
+          LLT T = Query.Types[LargeTypeIdx];
+          return std::make_pair(TypeIdx, T);
+        });
+  }
+
   /// Add more elements to the vector to reach the next power of two.
   /// No effect if the type is not a vector or the element count is a power of
   /// two.
@@ -1043,12 +1103,22 @@
   LegalizeActionStep getAction(const MachineInstr &MI,
                                const MachineRegisterInfo &MRI) const;
 
+  bool isLegal(const LegalityQuery &Query) const {
+    return getAction(Query).Action == LegalizeAction::Legal;
+  }
   bool isLegal(const MachineInstr &MI, const MachineRegisterInfo &MRI) const;
+  bool isLegalOrCustom(const MachineInstr &MI,
+                       const MachineRegisterInfo &MRI) const;
 
   virtual bool legalizeCustom(MachineInstr &MI, MachineRegisterInfo &MRI,
                               MachineIRBuilder &MIRBuilder,
                               GISelChangeObserver &Observer) const;
 
+  /// Return true if MI is either legal or has been legalized and false
+  /// if not legal.
+  virtual bool legalizeIntrinsic(MachineInstr &MI, MachineRegisterInfo &MRI,
+                                 MachineIRBuilder &MIRBuilder) const;
+
 private:
   /// Determine what action should be taken to legalize the given generic
   /// instruction opcode, type-index and type. Requires computeTables to have
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h
index cfc7c35..06de580 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Localizer.h
@@ -21,12 +21,14 @@
 #ifndef LLVM_CODEGEN_GLOBALISEL_LOCALIZER_H
 #define LLVM_CODEGEN_GLOBALISEL_LOCALIZER_H
 
+#include "llvm/ADT/SetVector.h"
 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
 
 namespace llvm {
 // Forward declarations.
 class MachineRegisterInfo;
+class TargetTransformInfo;
 
 /// This pass implements the localization mechanism described at the
 /// top of this file. One specificity of the implementation is that
@@ -43,9 +45,11 @@
   /// MRI contains all the register class/bank information that this
   /// pass uses and updates.
   MachineRegisterInfo *MRI;
+  /// TTI used for getting remat costs for instructions.
+  TargetTransformInfo *TTI;
 
   /// Check whether or not \p MI needs to be moved close to its uses.
-  static bool shouldLocalize(const MachineInstr &MI);
+  bool shouldLocalize(const MachineInstr &MI);
 
   /// Check if \p MOUse is used in the same basic block as \p Def.
   /// If the use is in the same block, we say it is local.
@@ -57,6 +61,15 @@
   /// Initialize the field members using \p MF.
   void init(MachineFunction &MF);
 
+  typedef SmallSetVector<MachineInstr *, 32> LocalizedSetVecT;
+
+  /// Do inter-block localization from the entry block.
+  bool localizeInterBlock(MachineFunction &MF,
+                          LocalizedSetVecT &LocalizedInstrs);
+
+  /// Do intra-block localization of already localized instructions.
+  bool localizeIntraBlock(LocalizedSetVecT &LocalizedInstrs);
+
 public:
   Localizer();
 
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
index 1f4ee8a..13eddd9 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
@@ -1,4 +1,4 @@
-//== ----- llvm/CodeGen/GlobalISel/MIPatternMatch.h --------------------- == //
+//==------ llvm/CodeGen/GlobalISel/MIPatternMatch.h -------------*- C++ -*-===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -30,8 +30,7 @@
   SubPatternT SubPat;
   OneUse_match(const SubPatternT &SP) : SubPat(SP) {}
 
-  template <typename OpTy>
-  bool match(const MachineRegisterInfo &MRI, unsigned Reg) {
+  bool match(MachineRegisterInfo &MRI, unsigned Reg) {
     return MRI.hasOneUse(Reg) && SubPat.match(MRI, Reg);
   }
 };
@@ -161,7 +160,7 @@
   }
 };
 
-inline bind_ty<unsigned> m_Reg(unsigned &R) { return R; }
+inline bind_ty<Register> m_Reg(Register &R) { return R; }
 inline bind_ty<MachineInstr *> m_MInstr(MachineInstr *&MI) { return MI; }
 inline bind_ty<LLT> m_Type(LLT &Ty) { return Ty; }
 
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index 6e1ff7c..da3c478 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -59,13 +59,14 @@
 class DstOp {
   union {
     LLT LLTTy;
-    unsigned Reg;
+    Register Reg;
     const TargetRegisterClass *RC;
   };
 
 public:
   enum class DstType { Ty_LLT, Ty_Reg, Ty_RC };
   DstOp(unsigned R) : Reg(R), Ty(DstType::Ty_Reg) {}
+  DstOp(Register R) : Reg(R), Ty(DstType::Ty_Reg) {}
   DstOp(const MachineOperand &Op) : Reg(Op.getReg()), Ty(DstType::Ty_Reg) {}
   DstOp(const LLT &T) : LLTTy(T), Ty(DstType::Ty_LLT) {}
   DstOp(const TargetRegisterClass *TRC) : RC(TRC), Ty(DstType::Ty_RC) {}
@@ -96,7 +97,7 @@
     llvm_unreachable("Unrecognised DstOp::DstType enum");
   }
 
-  unsigned getReg() const {
+  Register getReg() const {
     assert(Ty == DstType::Ty_Reg && "Not a register");
     return Reg;
   }
@@ -119,13 +120,13 @@
 class SrcOp {
   union {
     MachineInstrBuilder SrcMIB;
-    unsigned Reg;
+    Register Reg;
     CmpInst::Predicate Pred;
   };
 
 public:
   enum class SrcType { Ty_Reg, Ty_MIB, Ty_Predicate };
-  SrcOp(unsigned R) : Reg(R), Ty(SrcType::Ty_Reg) {}
+  SrcOp(Register R) : Reg(R), Ty(SrcType::Ty_Reg) {}
   SrcOp(const MachineOperand &Op) : Reg(Op.getReg()), Ty(SrcType::Ty_Reg) {}
   SrcOp(const MachineInstrBuilder &MIB) : SrcMIB(MIB), Ty(SrcType::Ty_MIB) {}
   SrcOp(const CmpInst::Predicate P) : Pred(P), Ty(SrcType::Ty_Predicate) {}
@@ -156,7 +157,7 @@
     llvm_unreachable("Unrecognised SrcOp::SrcType enum");
   }
 
-  unsigned getReg() const {
+  Register getReg() const {
     switch (Ty) {
     case SrcType::Ty_Predicate:
       llvm_unreachable("Not a register operand");
@@ -237,6 +238,10 @@
     return *State.MF;
   }
 
+  const DataLayout &getDataLayout() const {
+    return getMF().getFunction().getParent()->getDataLayout();
+  }
+
   /// Getter for DebugLoc
   const DebugLoc &getDL() { return State.DL; }
 
@@ -317,13 +322,13 @@
 
   /// Build and insert a DBG_VALUE instruction expressing the fact that the
   /// associated \p Variable lives in \p Reg (suitably modified by \p Expr).
-  MachineInstrBuilder buildDirectDbgValue(unsigned Reg, const MDNode *Variable,
+  MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable,
                                           const MDNode *Expr);
 
   /// Build and insert a DBG_VALUE instruction expressing the fact that the
   /// associated \p Variable lives in memory at \p Reg (suitably modified by \p
   /// Expr).
-  MachineInstrBuilder buildIndirectDbgValue(unsigned Reg,
+  MachineInstrBuilder buildIndirectDbgValue(Register Reg,
                                             const MDNode *Variable,
                                             const MDNode *Expr);
 
@@ -352,7 +357,7 @@
   /// \pre \p Res must be a generic virtual register with pointer type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx);
+  MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx);
 
   /// Build and insert \p Res = G_GLOBAL_VALUE \p GV
   ///
@@ -364,8 +369,7 @@
   ///      in the same address space as \p GV.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildGlobalValue(unsigned Res, const GlobalValue *GV);
-
+  MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV);
 
   /// Build and insert \p Res = G_GEP \p Op0, \p Op1
   ///
@@ -378,8 +382,8 @@
   /// \pre \p Op1 must be a generic virtual register with scalar type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0,
-                               unsigned Op1);
+  MachineInstrBuilder buildGEP(const DstOp &Res, const SrcOp &Op0,
+                               const SrcOp &Op1);
 
   /// Materialize and insert \p Res = G_GEP \p Op0, (G_CONSTANT \p Value)
   ///
@@ -397,7 +401,7 @@
   ///       type as \p Op0 or \p Op0 itself.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  Optional<MachineInstrBuilder> materializeGEP(unsigned &Res, unsigned Op0,
+  Optional<MachineInstrBuilder> materializeGEP(Register &Res, Register Op0,
                                                const LLT &ValueTy,
                                                uint64_t Value);
 
@@ -414,9 +418,24 @@
   ///      be cleared in \p Op0.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildPtrMask(unsigned Res, unsigned Op0,
+  MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0,
                                    uint32_t NumBits);
 
+  /// Build and insert \p Res, \p CarryOut = G_UADDO \p Op0, \p Op1
+  ///
+  /// G_UADDO sets \p Res to \p Op0 + \p Op1 (truncated to the bit width) and
+  /// sets \p CarryOut to 1 if the result overflowed in unsigned arithmetic.
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers with the
+  /// same scalar type.
+  ////\pre \p CarryOut must be generic virtual register with scalar type
+  ///(typically s1)
+  ///
+  /// \return The newly created instruction.
+  MachineInstrBuilder buildUAddo(const DstOp &Res, const DstOp &CarryOut,
+                                 const SrcOp &Op0, const SrcOp &Op1);
+
   /// Build and insert \p Res, \p CarryOut = G_UADDE \p Op0,
   /// \p Op1, \p CarryIn
   ///
@@ -465,6 +484,16 @@
   /// \return The newly created instruction.
   MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op);
 
+  /// Build and insert a G_PTRTOINT instruction.
+  MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src) {
+    return buildInstr(TargetOpcode::G_PTRTOINT, {Dst}, {Src});
+  }
+
+  /// Build and insert \p Dst = G_BITCAST \p Src
+  MachineInstrBuilder buildBitcast(const DstOp &Dst, const SrcOp &Src) {
+    return buildInstr(TargetOpcode::G_BITCAST, {Dst}, {Src});
+  }
+
   /// \return The opcode of the extension the target wants to use for boolean
   /// values.
   unsigned getBoolExtOp(bool IsVec, bool IsFP) const;
@@ -554,7 +583,7 @@
   ///      depend on bit 0 (for now).
   ///
   /// \return The newly created instruction.
-  MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &Dest);
+  MachineInstrBuilder buildBrCond(Register Tst, MachineBasicBlock &Dest);
 
   /// Build and insert G_BRINDIRECT \p Tgt
   ///
@@ -564,7 +593,21 @@
   /// \pre \p Tgt must be a generic virtual register with pointer type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildBrIndirect(unsigned Tgt);
+  MachineInstrBuilder buildBrIndirect(Register Tgt);
+
+  /// Build and insert G_BRJT \p TablePtr, \p JTI, \p IndexReg
+  ///
+  /// G_BRJT is a jump table branch using a table base pointer \p TablePtr,
+  /// jump table index \p JTI and index \p IndexReg
+  ///
+  /// \pre setBasicBlock or setMI must have been called.
+  /// \pre \p TablePtr must be a generic virtual register with pointer type.
+  /// \pre \p JTI must be be a jump table index.
+  /// \pre \p IndexReg must be a generic virtual register with pointer type.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI,
+                                Register IndexReg);
 
   /// Build and insert \p Res = G_CONSTANT \p Val
   ///
@@ -603,6 +646,7 @@
                                              const ConstantFP &Val);
 
   MachineInstrBuilder buildFConstant(const DstOp &Res, double Val);
+  MachineInstrBuilder buildFConstant(const DstOp &Res, const APFloat &Val);
 
   /// Build and insert \p Res = COPY Op
   ///
@@ -622,7 +666,7 @@
   /// \pre \p Addr must be a generic virtual register with pointer type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr,
+  MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr,
                                 MachineMemOperand &MMO);
 
   /// Build and insert `Res = <opcode> Addr, MMO`.
@@ -634,8 +678,8 @@
   /// \pre \p Addr must be a generic virtual register with pointer type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildLoadInstr(unsigned Opcode, unsigned Res,
-                                     unsigned Addr, MachineMemOperand &MMO);
+  MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res,
+                                     const SrcOp &Addr, MachineMemOperand &MMO);
 
   /// Build and insert `G_STORE Val, Addr, MMO`.
   ///
@@ -646,7 +690,7 @@
   /// \pre \p Addr must be a generic virtual register with pointer type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildStore(unsigned Val, unsigned Addr,
+  MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr,
                                  MachineMemOperand &MMO);
 
   /// Build and insert `Res0, ... = G_EXTRACT Src, Idx0`.
@@ -655,7 +699,7 @@
   /// \pre \p Res and \p Src must be generic virtual registers.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildExtract(unsigned Res, unsigned Src, uint64_t Index);
+  MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index);
 
   /// Build and insert \p Res = IMPLICIT_DEF.
   MachineInstrBuilder buildUndef(const DstOp &Res);
@@ -673,7 +717,7 @@
   /// \pre The bits defined by each Op (derived from index and scalar size) must
   ///      not overlap.
   /// \pre \p Indices must be in ascending order of bit position.
-  void buildSequence(unsigned Res, ArrayRef<unsigned> Ops,
+  void buildSequence(Register Res, ArrayRef<Register> Ops,
                      ArrayRef<uint64_t> Indices);
 
   /// Build and insert \p Res = G_MERGE_VALUES \p Op0, ...
@@ -687,7 +731,7 @@
   /// \pre The type of all \p Ops registers must be identical.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef<unsigned> Ops);
+  MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef<Register> Ops);
 
   /// Build and insert \p Res0, ... = G_UNMERGE_VALUES \p Op
   ///
@@ -700,7 +744,10 @@
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder buildUnmerge(ArrayRef<LLT> Res, const SrcOp &Op);
-  MachineInstrBuilder buildUnmerge(ArrayRef<unsigned> Res, const SrcOp &Op);
+  MachineInstrBuilder buildUnmerge(ArrayRef<Register> Res, const SrcOp &Op);
+
+  /// Build and insert an unmerge of \p Res sized pieces to cover \p Op
+  MachineInstrBuilder buildUnmerge(LLT Res, const SrcOp &Op);
 
   /// Build and insert \p Res = G_BUILD_VECTOR \p Op0, ...
   ///
@@ -712,7 +759,7 @@
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder buildBuildVector(const DstOp &Res,
-                                       ArrayRef<unsigned> Ops);
+                                       ArrayRef<Register> Ops);
 
   /// Build and insert \p Res = G_BUILD_VECTOR with \p Src replicated to fill
   /// the number of elements
@@ -733,7 +780,7 @@
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res,
-                                            ArrayRef<unsigned> Ops);
+                                            ArrayRef<Register> Ops);
 
   /// Build and insert \p Res = G_CONCAT_VECTORS \p Op0, ...
   ///
@@ -747,10 +794,10 @@
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder buildConcatVectors(const DstOp &Res,
-                                         ArrayRef<unsigned> Ops);
+                                         ArrayRef<Register> Ops);
 
-  MachineInstrBuilder buildInsert(unsigned Res, unsigned Src,
-                                  unsigned Op, unsigned Index);
+  MachineInstrBuilder buildInsert(Register Res, Register Src,
+                                  Register Op, unsigned Index);
 
   /// Build and insert either a G_INTRINSIC (if \p HasSideEffects is false) or
   /// G_INTRINSIC_W_SIDE_EFFECTS instruction. Its first operand will be the
@@ -762,7 +809,9 @@
   /// \pre setBasicBlock or setMI must have been called.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, unsigned Res,
+  MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef<Register> Res,
+                                     bool HasSideEffects);
+  MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef<DstOp> Res,
                                      bool HasSideEffects);
 
   /// Build and insert \p Res = G_FPTRUNC \p Op
@@ -877,8 +926,8 @@
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder
-  buildAtomicCmpXchgWithSuccess(unsigned OldValRes, unsigned SuccessRes,
-                                unsigned Addr, unsigned CmpVal, unsigned NewVal,
+  buildAtomicCmpXchgWithSuccess(Register OldValRes, Register SuccessRes,
+                                Register Addr, Register CmpVal, Register NewVal,
                                 MachineMemOperand &MMO);
 
   /// Build and insert `OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal,
@@ -895,8 +944,8 @@
   ///      registers of the same type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildAtomicCmpXchg(unsigned OldValRes, unsigned Addr,
-                                         unsigned CmpVal, unsigned NewVal,
+  MachineInstrBuilder buildAtomicCmpXchg(Register OldValRes, Register Addr,
+                                         Register CmpVal, Register NewVal,
                                          MachineMemOperand &MMO);
 
   /// Build and insert `OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO`.
@@ -912,8 +961,8 @@
   ///      same type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildAtomicRMW(unsigned Opcode, unsigned OldValRes,
-                                     unsigned Addr, unsigned Val,
+  MachineInstrBuilder buildAtomicRMW(unsigned Opcode, Register OldValRes,
+                                     Register Addr, Register Val,
                                      MachineMemOperand &MMO);
 
   /// Build and insert `OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO`.
@@ -928,8 +977,8 @@
   ///      same type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildAtomicRMWXchg(unsigned OldValRes, unsigned Addr,
-                                         unsigned Val, MachineMemOperand &MMO);
+  MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr,
+                                         Register Val, MachineMemOperand &MMO);
 
   /// Build and insert `OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO`.
   ///
@@ -943,8 +992,8 @@
   ///      same type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildAtomicRMWAdd(unsigned OldValRes, unsigned Addr,
-                                         unsigned Val, MachineMemOperand &MMO);
+  MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr,
+                                        Register Val, MachineMemOperand &MMO);
 
   /// Build and insert `OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO`.
   ///
@@ -958,8 +1007,8 @@
   ///      same type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildAtomicRMWSub(unsigned OldValRes, unsigned Addr,
-                                         unsigned Val, MachineMemOperand &MMO);
+  MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr,
+                                        Register Val, MachineMemOperand &MMO);
 
   /// Build and insert `OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO`.
   ///
@@ -973,8 +1022,8 @@
   ///      same type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildAtomicRMWAnd(unsigned OldValRes, unsigned Addr,
-                                         unsigned Val, MachineMemOperand &MMO);
+  MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr,
+                                        Register Val, MachineMemOperand &MMO);
 
   /// Build and insert `OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO`.
   ///
@@ -989,8 +1038,8 @@
   ///      same type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildAtomicRMWNand(unsigned OldValRes, unsigned Addr,
-                                         unsigned Val, MachineMemOperand &MMO);
+  MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr,
+                                         Register Val, MachineMemOperand &MMO);
 
   /// Build and insert `OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO`.
   ///
@@ -1004,8 +1053,8 @@
   ///      same type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildAtomicRMWOr(unsigned OldValRes, unsigned Addr,
-                                       unsigned Val, MachineMemOperand &MMO);
+  MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr,
+                                       Register Val, MachineMemOperand &MMO);
 
   /// Build and insert `OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO`.
   ///
@@ -1019,8 +1068,8 @@
   ///      same type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildAtomicRMWXor(unsigned OldValRes, unsigned Addr,
-                                        unsigned Val, MachineMemOperand &MMO);
+  MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr,
+                                        Register Val, MachineMemOperand &MMO);
 
   /// Build and insert `OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO`.
   ///
@@ -1035,8 +1084,8 @@
   ///      same type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildAtomicRMWMax(unsigned OldValRes, unsigned Addr,
-                                        unsigned Val, MachineMemOperand &MMO);
+  MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr,
+                                        Register Val, MachineMemOperand &MMO);
 
   /// Build and insert `OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO`.
   ///
@@ -1051,8 +1100,8 @@
   ///      same type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildAtomicRMWMin(unsigned OldValRes, unsigned Addr,
-                                        unsigned Val, MachineMemOperand &MMO);
+  MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr,
+                                        Register Val, MachineMemOperand &MMO);
 
   /// Build and insert `OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO`.
   ///
@@ -1067,8 +1116,8 @@
   ///      same type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildAtomicRMWUmax(unsigned OldValRes, unsigned Addr,
-                                         unsigned Val, MachineMemOperand &MMO);
+  MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr,
+                                         Register Val, MachineMemOperand &MMO);
 
   /// Build and insert `OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO`.
   ///
@@ -1083,8 +1132,11 @@
   ///      same type.
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildAtomicRMWUmin(unsigned OldValRes, unsigned Addr,
-                                         unsigned Val, MachineMemOperand &MMO);
+  MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr,
+                                         Register Val, MachineMemOperand &MMO);
+
+  /// Build and insert `G_FENCE Ordering, Scope`.
+  MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope);
 
   /// Build and insert \p Res = G_BLOCK_ADDR \p BA
   ///
@@ -1094,7 +1146,7 @@
   /// \pre \p Res must be a generic virtual register of a pointer type.
   ///
   /// \return The newly created instruction.
-  MachineInstrBuilder buildBlockAddress(unsigned Res, const BlockAddress *BA);
+  MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA);
 
   /// Build and insert \p Res = G_ADD \p Op0, \p Op1
   ///
@@ -1207,6 +1259,131 @@
     return buildInstr(TargetOpcode::G_OR, {Dst}, {Src0, Src1});
   }
 
+  /// Build and insert \p Res = G_XOR \p Op0, \p Op1
+  MachineInstrBuilder buildXor(const DstOp &Dst, const SrcOp &Src0,
+                               const SrcOp &Src1) {
+    return buildInstr(TargetOpcode::G_XOR, {Dst}, {Src0, Src1});
+  }
+
+  /// Build and insert a bitwise not,
+  /// \p NegOne = G_CONSTANT -1
+  /// \p Res = G_OR \p Op0, NegOne
+  MachineInstrBuilder buildNot(const DstOp &Dst, const SrcOp &Src0) {
+    auto NegOne = buildConstant(Dst.getLLTTy(*getMRI()), -1);
+    return buildInstr(TargetOpcode::G_XOR, {Dst}, {Src0, NegOne});
+  }
+
+  /// Build and insert \p Res = G_CTPOP \p Op0, \p Src0
+  MachineInstrBuilder buildCTPOP(const DstOp &Dst, const SrcOp &Src0) {
+    return buildInstr(TargetOpcode::G_CTPOP, {Dst}, {Src0});
+  }
+
+  /// Build and insert \p Res = G_CTLZ \p Op0, \p Src0
+  MachineInstrBuilder buildCTLZ(const DstOp &Dst, const SrcOp &Src0) {
+    return buildInstr(TargetOpcode::G_CTLZ, {Dst}, {Src0});
+  }
+
+  /// Build and insert \p Res = G_CTLZ_ZERO_UNDEF \p Op0, \p Src0
+  MachineInstrBuilder buildCTLZ_ZERO_UNDEF(const DstOp &Dst, const SrcOp &Src0) {
+    return buildInstr(TargetOpcode::G_CTLZ_ZERO_UNDEF, {Dst}, {Src0});
+  }
+
+  /// Build and insert \p Res = G_CTTZ \p Op0, \p Src0
+  MachineInstrBuilder buildCTTZ(const DstOp &Dst, const SrcOp &Src0) {
+    return buildInstr(TargetOpcode::G_CTTZ, {Dst}, {Src0});
+  }
+
+  /// Build and insert \p Res = G_CTTZ_ZERO_UNDEF \p Op0, \p Src0
+  MachineInstrBuilder buildCTTZ_ZERO_UNDEF(const DstOp &Dst, const SrcOp &Src0) {
+    return buildInstr(TargetOpcode::G_CTTZ_ZERO_UNDEF, {Dst}, {Src0});
+  }
+
+  /// Build and insert \p Res = G_FADD \p Op0, \p Op1
+  MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0,
+                                const SrcOp &Src1) {
+    return buildInstr(TargetOpcode::G_FADD, {Dst}, {Src0, Src1});
+  }
+
+  /// Build and insert \p Res = G_FSUB \p Op0, \p Op1
+  MachineInstrBuilder buildFSub(const DstOp &Dst, const SrcOp &Src0,
+                                const SrcOp &Src1) {
+    return buildInstr(TargetOpcode::G_FSUB, {Dst}, {Src0, Src1});
+  }
+
+  /// Build and insert \p Res = G_FMA \p Op0, \p Op1, \p Op2
+  MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0,
+                               const SrcOp &Src1, const SrcOp &Src2) {
+    return buildInstr(TargetOpcode::G_FMA, {Dst}, {Src0, Src1, Src2});
+  }
+
+  /// Build and insert \p Res = G_FNEG \p Op0
+  MachineInstrBuilder buildFNeg(const DstOp &Dst, const SrcOp &Src0) {
+    return buildInstr(TargetOpcode::G_FNEG, {Dst}, {Src0});
+  }
+
+  /// Build and insert \p Res = G_FABS \p Op0
+  MachineInstrBuilder buildFAbs(const DstOp &Dst, const SrcOp &Src0) {
+    return buildInstr(TargetOpcode::G_FABS, {Dst}, {Src0});
+  }
+
+  /// Build and insert \p Res = G_FCOPYSIGN \p Op0, \p Op1
+  MachineInstrBuilder buildFCopysign(const DstOp &Dst, const SrcOp &Src0,
+                                     const SrcOp &Src1) {
+    return buildInstr(TargetOpcode::G_FCOPYSIGN, {Dst}, {Src0, Src1});
+  }
+
+  /// Build and insert \p Res = G_UITOFP \p Src0
+  MachineInstrBuilder buildUITOFP(const DstOp &Dst, const SrcOp &Src0) {
+    return buildInstr(TargetOpcode::G_UITOFP, {Dst}, {Src0});
+  }
+
+  /// Build and insert \p Res = G_SITOFP \p Src0
+  MachineInstrBuilder buildSITOFP(const DstOp &Dst, const SrcOp &Src0) {
+    return buildInstr(TargetOpcode::G_SITOFP, {Dst}, {Src0});
+  }
+
+  /// Build and insert \p Res = G_FPTOUI \p Src0
+  MachineInstrBuilder buildFPTOUI(const DstOp &Dst, const SrcOp &Src0) {
+    return buildInstr(TargetOpcode::G_FPTOUI, {Dst}, {Src0});
+  }
+
+  /// Build and insert \p Res = G_FPTOSI \p Src0
+  MachineInstrBuilder buildFPTOSI(const DstOp &Dst, const SrcOp &Src0) {
+    return buildInstr(TargetOpcode::G_FPTOSI, {Dst}, {Src0});
+  }
+
+  /// Build and insert \p Res = G_SMIN \p Op0, \p Op1
+  MachineInstrBuilder buildSMin(const DstOp &Dst, const SrcOp &Src0,
+                                const SrcOp &Src1) {
+    return buildInstr(TargetOpcode::G_SMIN, {Dst}, {Src0, Src1});
+  }
+
+  /// Build and insert \p Res = G_SMAX \p Op0, \p Op1
+  MachineInstrBuilder buildSMax(const DstOp &Dst, const SrcOp &Src0,
+                                const SrcOp &Src1) {
+    return buildInstr(TargetOpcode::G_SMAX, {Dst}, {Src0, Src1});
+  }
+
+  /// Build and insert \p Res = G_UMIN \p Op0, \p Op1
+  MachineInstrBuilder buildUMin(const DstOp &Dst, const SrcOp &Src0,
+                                const SrcOp &Src1) {
+    return buildInstr(TargetOpcode::G_UMIN, {Dst}, {Src0, Src1});
+  }
+
+  /// Build and insert \p Res = G_UMAX \p Op0, \p Op1
+  MachineInstrBuilder buildUMax(const DstOp &Dst, const SrcOp &Src0,
+                                const SrcOp &Src1) {
+    return buildInstr(TargetOpcode::G_UMAX, {Dst}, {Src0, Src1});
+  }
+
+  /// Build and insert \p Res = G_JUMP_TABLE \p JTI
+  ///
+  /// G_JUMP_TABLE sets \p Res to the address of the jump table specified by
+  /// the jump table index \p JTI.
+  ///
+  /// \return a MachineInstrBuilder for the newly created instruction.
+  MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI);
+
   virtual MachineInstrBuilder buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
                                          ArrayRef<SrcOp> SrcOps,
                                          Optional<unsigned> Flags = None);
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegBankSelect.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
index c31ca5c..d9d076b 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
@@ -523,7 +523,7 @@
   /// \p OnlyAssign == true means that \p Reg just needs to be assigned a
   /// register bank.  I.e., no repairing is necessary to have the
   /// assignment match.
-  bool assignmentMatch(unsigned Reg,
+  bool assignmentMatch(Register Reg,
                        const RegisterBankInfo::ValueMapping &ValMapping,
                        bool &OnlyAssign) const;
 
@@ -562,7 +562,7 @@
   bool repairReg(MachineOperand &MO,
                  const RegisterBankInfo::ValueMapping &ValMapping,
                  RegBankSelect::RepairingPlacement &RepairPt,
-                 const iterator_range<SmallVectorImpl<unsigned>::const_iterator>
+                 const iterator_range<SmallVectorImpl<Register>::const_iterator>
                      &NewVRegs);
 
   /// Return the cost of the instruction needed to map \p MO to \p ValMapping.
@@ -633,6 +633,11 @@
         MachineFunctionProperties::Property::RegBankSelected);
   }
 
+  MachineFunctionProperties getClearedProperties() const override {
+    return MachineFunctionProperties()
+      .set(MachineFunctionProperties::Property::NoPHIs);
+  }
+
   /// Walk through \p MF and assign a register bank to every virtual register
   /// that are still mapped to nothing.
   /// The target needs to provide a RegisterBankInfo and in particular
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
index 37a63d1..e84b1c3 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
@@ -18,6 +18,7 @@
 #include "llvm/ADT/Hashing.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/iterator_range.h"
+#include "llvm/CodeGen/Register.h"
 #include "llvm/Support/ErrorHandling.h"
 #include <cassert>
 #include <initializer_list>
@@ -193,7 +194,7 @@
     unsigned Cost = 0;
 
     /// Mapping of all the operands.
-    const ValueMapping *OperandsMapping;
+    const ValueMapping *OperandsMapping = nullptr;
 
     /// Number of operands.
     unsigned NumOperands = 0;
@@ -210,15 +211,11 @@
     /// The rationale is that it is more efficient for the optimizers
     /// to be able to assume that the mapping of the ith operand is
     /// at the index i.
-    ///
-    /// \pre ID != InvalidMappingID
     InstructionMapping(unsigned ID, unsigned Cost,
                        const ValueMapping *OperandsMapping,
                        unsigned NumOperands)
         : ID(ID), Cost(Cost), OperandsMapping(OperandsMapping),
           NumOperands(NumOperands) {
-      assert(getID() != InvalidMappingID &&
-             "Use the default constructor for invalid mapping");
     }
 
     /// Default constructor.
@@ -285,7 +282,7 @@
     SmallVector<int, 8> OpToNewVRegIdx;
 
     /// Hold the registers that will be used to map MI with InstrMapping.
-    SmallVector<unsigned, 8> NewVRegs;
+    SmallVector<Register, 8> NewVRegs;
 
     /// Current MachineRegisterInfo, used to create new virtual registers.
     MachineRegisterInfo &MRI;
@@ -306,15 +303,15 @@
     /// \return The iterator range for the space created.
     //
     /// \pre getMI().getOperand(OpIdx).isReg()
-    iterator_range<SmallVectorImpl<unsigned>::iterator>
+    iterator_range<SmallVectorImpl<Register>::iterator>
     getVRegsMem(unsigned OpIdx);
 
     /// Get the end iterator for a range starting at \p StartIdx and
     /// spannig \p NumVal in NewVRegs.
     /// \pre StartIdx + NumVal <= NewVRegs.size()
-    SmallVectorImpl<unsigned>::const_iterator
+    SmallVectorImpl<Register>::const_iterator
     getNewVRegsEnd(unsigned StartIdx, unsigned NumVal) const;
-    SmallVectorImpl<unsigned>::iterator getNewVRegsEnd(unsigned StartIdx,
+    SmallVectorImpl<Register>::iterator getNewVRegsEnd(unsigned StartIdx,
                                                        unsigned NumVal);
 
   public:
@@ -360,7 +357,7 @@
     ///
     /// \post the \p PartialMapIdx-th register of the value mapping of the \p
     /// OpIdx-th operand has been set.
-    void setVRegs(unsigned OpIdx, unsigned PartialMapIdx, unsigned NewVReg);
+    void setVRegs(unsigned OpIdx, unsigned PartialMapIdx, Register NewVReg);
 
     /// Get all the virtual registers required to map the \p OpIdx-th operand of
     /// the instruction.
@@ -374,7 +371,7 @@
     ///
     /// \pre getMI().getOperand(OpIdx).isReg()
     /// \pre ForDebug || All partial mappings have been set a register
-    iterator_range<SmallVectorImpl<unsigned>::const_iterator>
+    iterator_range<SmallVectorImpl<Register>::const_iterator>
     getVRegs(unsigned OpIdx, bool ForDebug = false) const;
 
     /// Print this operands mapper on dbgs() stream.
@@ -438,7 +435,7 @@
   /// Get the MinimalPhysRegClass for Reg.
   /// \pre Reg is a physical register.
   const TargetRegisterClass &
-  getMinimalPhysRegClass(unsigned Reg, const TargetRegisterInfo &TRI) const;
+  getMinimalPhysRegClass(Register Reg, const TargetRegisterInfo &TRI) const;
 
   /// Try to get the mapping of \p MI.
   /// See getInstrMapping for more details on what a mapping represents.
@@ -583,7 +580,7 @@
   /// or a register bank, then this returns nullptr.
   ///
   /// \pre Reg != 0 (NoRegister)
-  const RegisterBank *getRegBank(unsigned Reg, const MachineRegisterInfo &MRI,
+  const RegisterBank *getRegBank(Register Reg, const MachineRegisterInfo &MRI,
                                  const TargetRegisterInfo &TRI) const;
 
   /// Get the total number of register banks.
@@ -621,6 +618,12 @@
     return &A != &B;
   }
 
+  /// \returns true if emitting a copy from \p Src to \p Dst is impossible.
+  bool cannotCopy(const RegisterBank &Dst, const RegisterBank &Src,
+                  unsigned Size) const {
+    return copyCost(Dst, Src, Size) == std::numeric_limits<unsigned>::max();
+  }
+
   /// Get the cost of using \p ValMapping to decompose a register. This is
   /// similar to ::copyCost, except for cases where multiple copy-like
   /// operations need to be inserted. If the register is used as a source
@@ -638,7 +641,7 @@
   /// \note Use MachineRegisterInfo::constrainRegAttrs instead for any non-isel
   /// purpose, including non-select passes of GlobalISel
   static const TargetRegisterClass *
-  constrainGenericRegister(unsigned Reg, const TargetRegisterClass &RC,
+  constrainGenericRegister(Register Reg, const TargetRegisterClass &RC,
                            MachineRegisterInfo &MRI);
 
   /// Identifier used when the related instruction mapping instance
@@ -723,7 +726,7 @@
   /// virtual register.
   ///
   /// \pre \p Reg != 0 (NoRegister).
-  unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI,
+  unsigned getSizeInBits(Register Reg, const MachineRegisterInfo &MRI,
                          const TargetRegisterInfo &TRI) const;
 
   /// Check that information hold by this instance make sense for the
diff --git a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h
index dd14a0c..6946aad 100644
--- a/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/linux-x64/clang/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -15,6 +15,7 @@
 #define LLVM_CODEGEN_GLOBALISEL_UTILS_H
 
 #include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/Register.h"
 
 namespace llvm {
 
@@ -36,21 +37,37 @@
 class APFloat;
 
 /// Try to constrain Reg to the specified register class. If this fails,
-/// create a new virtual register in the correct class and insert a COPY before
-/// \p InsertPt. The debug location of \p InsertPt is used for the new copy.
+/// create a new virtual register in the correct class.
 ///
 /// \return The virtual register constrained to the right register class.
 unsigned constrainRegToClass(MachineRegisterInfo &MRI,
                              const TargetInstrInfo &TII,
-                             const RegisterBankInfo &RBI,
-                             MachineInstr &InsertPt, unsigned Reg,
+                             const RegisterBankInfo &RBI, unsigned Reg,
                              const TargetRegisterClass &RegClass);
 
+/// Constrain the Register operand OpIdx, so that it is now constrained to the
+/// TargetRegisterClass passed as an argument (RegClass).
+/// If this fails, create a new virtual register in the correct class and
+/// insert a COPY before \p InsertPt if it is a use or after if it is a
+/// definition. The debug location of \p InsertPt is used for the new copy.
+///
+/// \return The virtual register constrained to the right register class.
+unsigned constrainOperandRegClass(const MachineFunction &MF,
+                                  const TargetRegisterInfo &TRI,
+                                  MachineRegisterInfo &MRI,
+                                  const TargetInstrInfo &TII,
+                                  const RegisterBankInfo &RBI,
+                                  MachineInstr &InsertPt,
+                                  const TargetRegisterClass &RegClass,
+                                  const MachineOperand &RegMO, unsigned OpIdx);
+
 /// Try to constrain Reg so that it is usable by argument OpIdx of the
 /// provided MCInstrDesc \p II. If this fails, create a new virtual
-/// register in the correct class and insert a COPY before \p InsertPt.
-/// This is equivalent to constrainRegToClass() with RegClass obtained from the
-/// MCInstrDesc. The debug location of \p InsertPt is used for the new copy.
+/// register in the correct class and insert a COPY before \p InsertPt
+/// if it is a use or after if it is a definition.
+/// This is equivalent to constrainOperandRegClass(..., RegClass, ...)
+/// with RegClass obtained from the MCInstrDesc. The debug location of \p
+/// InsertPt is used for the new copy.
 ///
 /// \return The virtual register constrained to the right register class.
 unsigned constrainOperandRegClass(const MachineFunction &MF,
@@ -89,17 +106,40 @@
                         const char *PassName, StringRef Msg,
                         const MachineInstr &MI);
 
+/// If \p VReg is defined by a G_CONSTANT fits in int64_t
+/// returns it.
 Optional<int64_t> getConstantVRegVal(unsigned VReg,
                                      const MachineRegisterInfo &MRI);
+/// Simple struct used to hold a constant integer value and a virtual
+/// register.
+struct ValueAndVReg {
+  int64_t Value;
+  unsigned VReg;
+};
+/// If \p VReg is defined by a statically evaluable chain of
+/// instructions rooted on a G_CONSTANT (\p LookThroughInstrs == true)
+/// and that constant fits in int64_t, returns its value as well as
+/// the virtual register defined by this G_CONSTANT.
+/// When \p LookThroughInstrs == false, this function behaves like
+/// getConstantVRegVal.
+Optional<ValueAndVReg>
+getConstantVRegValWithLookThrough(unsigned VReg, const MachineRegisterInfo &MRI,
+                                  bool LookThroughInstrs = true);
 const ConstantFP* getConstantFPVRegVal(unsigned VReg,
                                        const MachineRegisterInfo &MRI);
 
 /// See if Reg is defined by an single def instruction that is
 /// Opcode. Also try to do trivial folding if it's a COPY with
 /// same types. Returns null otherwise.
-MachineInstr *getOpcodeDef(unsigned Opcode, unsigned Reg,
+MachineInstr *getOpcodeDef(unsigned Opcode, Register Reg,
                            const MachineRegisterInfo &MRI);
 
+/// Find the def instruction for \p Reg, folding away any trivial copies. Note
+/// it may still return a COPY, if it changes the type. May return nullptr if \p
+/// Reg is not a generic virtual register.
+MachineInstr *getDefIgnoringCopies(Register Reg,
+                                   const MachineRegisterInfo &MRI);
+
 /// Returns an APFloat from Val converted to the appropriate size.
 APFloat getAPFloatFromSize(double Val, unsigned Size);