Update prebuilt Clang to r416183b from Android.

https://android.googlesource.com/platform/prebuilts/clang/host/
linux-x86/+/06a71ddac05c22edb2d10b590e1769b3f8619bef

clang 12.0.5 (based on r416183b) from build 7284624.

Change-Id: I277a316abcf47307562d8b748b84870f31a72866
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/linux-x64/clang/include/llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h b/linux-x64/clang/include/llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h
index 887c880..e5e24e0 100644
--- a/linux-x64/clang/include/llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h
+++ b/linux-x64/clang/include/llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h
@@ -17,7 +17,6 @@
 #ifndef LLVM_TRANSFORMS_AGGRESSIVE_INSTCOMBINE_INSTCOMBINE_H
 #define LLVM_TRANSFORMS_AGGRESSIVE_INSTCOMBINE_INSTCOMBINE_H
 
-#include "llvm/IR/Function.h"
 #include "llvm/IR/PassManager.h"
 
 namespace llvm {
diff --git a/linux-x64/clang/include/llvm/Transforms/CFGuard.h b/linux-x64/clang/include/llvm/Transforms/CFGuard.h
new file mode 100644
index 0000000..86fcbc3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/CFGuard.h
@@ -0,0 +1,26 @@
+//===-- CFGuard.h - CFGuard Transformations ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+// Windows Control Flow Guard passes (/guard:cf).
+//===---------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_CFGUARD_H
+#define LLVM_TRANSFORMS_CFGUARD_H
+
+namespace llvm {
+
+class FunctionPass;
+
+/// Insert Control FLow Guard checks on indirect function calls.
+FunctionPass *createCFGuardCheckPass();
+
+/// Insert Control FLow Guard dispatches on indirect function calls.
+FunctionPass *createCFGuardDispatchPass();
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Coroutines.h b/linux-x64/clang/include/llvm/Transforms/Coroutines.h
index 9df3ec0..2043592 100644
--- a/linux-x64/clang/include/llvm/Transforms/Coroutines.h
+++ b/linux-x64/clang/include/llvm/Transforms/Coroutines.h
@@ -20,17 +20,17 @@
 void addCoroutinePassesToExtensionPoints(PassManagerBuilder &Builder);
 
 /// Lower coroutine intrinsics that are not needed by later passes.
-Pass *createCoroEarlyPass();
+Pass *createCoroEarlyLegacyPass();
 
 /// Split up coroutines into multiple functions driving their state machines.
-Pass *createCoroSplitPass();
+Pass *createCoroSplitLegacyPass(bool ReuseFrameSlot = false);
 
 /// Analyze coroutines use sites, devirtualize resume/destroy calls and elide
 /// heap allocation for coroutine frame where possible.
-Pass *createCoroElidePass();
+Pass *createCoroElideLegacyPass();
 
 /// Lower all remaining coroutine intrinsics.
-Pass *createCoroCleanupPass();
+Pass *createCoroCleanupLegacyPass();
 
 }
 
diff --git a/linux-x64/clang/include/llvm/Transforms/Coroutines/CoroCleanup.h b/linux-x64/clang/include/llvm/Transforms/Coroutines/CoroCleanup.h
new file mode 100644
index 0000000..7ecdc05
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Coroutines/CoroCleanup.h
@@ -0,0 +1,29 @@
+//===-- CoroCleanup.h - Lower all coroutine related intrinsics --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// \file
+// This file delcares a pass that lowers all remaining coroutine intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_COROUTINES_COROCLEANUP_H
+#define LLVM_TRANSFORMS_COROUTINES_COROCLEANUP_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+struct CoroCleanupPass : PassInfoMixin<CoroCleanupPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  static bool isRequired() { return true; }
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_COROUTINES_COROCLEANUP_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Coroutines/CoroEarly.h b/linux-x64/clang/include/llvm/Transforms/Coroutines/CoroEarly.h
new file mode 100644
index 0000000..3f5ec2a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Coroutines/CoroEarly.h
@@ -0,0 +1,32 @@
+//===---- CoroEarly.h - Lower early coroutine intrinsics --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// \file
+// This file provides the interface to the early coroutine intrinsic lowering
+// pass. This pass lowers coroutine intrinsics that hide the details of the
+// exact calling convention for coroutine resume and destroy functions and
+// details of the structure of the coroutine frame.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_COROUTINES_COROEARLY_H
+#define LLVM_TRANSFORMS_COROUTINES_COROEARLY_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+struct CoroEarlyPass : PassInfoMixin<CoroEarlyPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  static bool isRequired() { return true; }
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_COROUTINES_COROEARLY_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Coroutines/CoroElide.h b/linux-x64/clang/include/llvm/Transforms/Coroutines/CoroElide.h
new file mode 100644
index 0000000..ff73cf2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Coroutines/CoroElide.h
@@ -0,0 +1,31 @@
+//===---- CoroElide.h - Coroutine frame allocation elision ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// \file
+// This file declares a pass that replaces dynamic allocation of coroutine
+// frames with alloca and replaces calls to llvm.coro.resume and
+// llvm.coro.destroy with direct calls to coroutine sub-functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_COROUTINES_COROELIDE_H
+#define LLVM_TRANSFORMS_COROUTINES_COROELIDE_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+struct CoroElidePass : PassInfoMixin<CoroElidePass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  static bool isRequired() { return true; }
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_COROUTINES_COROELIDE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Coroutines/CoroSplit.h b/linux-x64/clang/include/llvm/Transforms/Coroutines/CoroSplit.h
new file mode 100644
index 0000000..f4eef19
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Coroutines/CoroSplit.h
@@ -0,0 +1,35 @@
+//===- CoroSplit.h - Converts a coroutine into a state machine -*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// \file
+// This file declares the pass that builds the coroutine frame and outlines
+// the resume and destroy parts of the coroutine into separate functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_COROUTINES_COROSPLIT_H
+#define LLVM_TRANSFORMS_COROUTINES_COROSPLIT_H
+
+#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+struct CoroSplitPass : PassInfoMixin<CoroSplitPass> {
+  CoroSplitPass(bool ReuseFrameSlot = false) : ReuseFrameSlot(ReuseFrameSlot) {}
+
+  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &UR);
+  static bool isRequired() { return true; }
+
+  bool ReuseFrameSlot;
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_COROUTINES_COROSPLIT_H
diff --git a/linux-x64/clang/include/llvm/Transforms/HelloNew/HelloWorld.h b/linux-x64/clang/include/llvm/Transforms/HelloNew/HelloWorld.h
new file mode 100644
index 0000000..6c75303
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/HelloNew/HelloWorld.h
@@ -0,0 +1,23 @@
+//===-- HelloWorld.h - Example Transformations ------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_HELLONEW_HELLOWORLD_H
+#define LLVM_TRANSFORMS_HELLONEW_HELLOWORLD_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class HelloWorldPass : public PassInfoMixin<HelloWorldPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_HELLONEW_HELLOWORLD_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO.h b/linux-x64/clang/include/llvm/Transforms/IPO.h
index de0c80f..af35718 100644
--- a/linux-x64/clang/include/llvm/Transforms/IPO.h
+++ b/linux-x64/clang/include/llvm/Transforms/IPO.h
@@ -25,13 +25,19 @@
 class ModuleSummaryIndex;
 class ModulePass;
 class Pass;
-class Function;
 class BasicBlock;
 class GlobalValue;
 class raw_ostream;
 
 //===----------------------------------------------------------------------===//
 //
+// This pass adds !annotation metadata to entries in the
+// @llvm.global.annotations global constant.
+//
+ModulePass *createAnnotation2MetadataLegacyPass();
+
+//===----------------------------------------------------------------------===//
+//
 // These functions removes symbols from functions and modules.  If OnlyDebugInfo
 // is true, only debugging information is removed from the module.
 //
@@ -84,10 +90,12 @@
 //===----------------------------------------------------------------------===//
 /// createGVExtractionPass - If deleteFn is true, this pass deletes
 /// the specified global values. Otherwise, it deletes as much of the module as
-/// possible, except for the global values specified.
+/// possible, except for the global values specified. If keepConstInit is true,
+/// the initializers of global constants are not deleted even if they are
+/// unused.
 ///
 ModulePass *createGVExtractionPass(std::vector<GlobalValue*>& GVs, bool
-                                   deleteFn = false);
+                                  deleteFn = false, bool keepConstInit = false);
 
 //===----------------------------------------------------------------------===//
 /// This pass performs iterative function importing from other modules.
@@ -151,10 +159,8 @@
 Pass *createArgumentPromotionPass(unsigned maxElements = 3);
 
 //===----------------------------------------------------------------------===//
-/// createIPConstantPropagationPass - This pass propagates constants from call
-/// sites into the bodies of functions.
-///
-ModulePass *createIPConstantPropagationPass();
+/// createOpenMPOptLegacyPass - OpenMP specific optimizations.
+Pass *createOpenMPOptLegacyPass();
 
 //===----------------------------------------------------------------------===//
 /// createIPSCCPPass - This pass propagates constants from call sites into the
@@ -210,6 +216,11 @@
 ModulePass *createHotColdSplittingPass();
 
 //===----------------------------------------------------------------------===//
+/// createIROutlinerPass - This pass finds similar code regions and factors
+/// those regions out into functions.
+ModulePass *createIROutlinerPass();
+
+//===----------------------------------------------------------------------===//
 /// createPartialInliningPass - This pass inlines parts of functions.
 ///
 ModulePass *createPartialInliningPass();
@@ -236,12 +247,15 @@
 /// The behavior depends on the summary arguments:
 /// - If ExportSummary is non-null, this pass will export type identifiers to
 ///   the given summary.
-/// - Otherwise, if ImportSummary is non-null, this pass will import type
-///   identifiers from the given summary.
-/// - Otherwise it does neither.
-/// It is invalid for both ExportSummary and ImportSummary to be non-null.
+/// - If ImportSummary is non-null, this pass will import type identifiers from
+///   the given summary.
+/// - Otherwise, if both are null and DropTypeTests is true, all type test
+///   assume sequences will be removed from the IR.
+/// It is invalid for both ExportSummary and ImportSummary to be non-null
+/// unless DropTypeTests is true.
 ModulePass *createLowerTypeTestsPass(ModuleSummaryIndex *ExportSummary,
-                                     const ModuleSummaryIndex *ImportSummary);
+                                     const ModuleSummaryIndex *ImportSummary,
+                                     bool DropTypeTests = false);
 
 /// This pass export CFI checks for use by external modules.
 ModulePass *createCrossDSOCFIPass();
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/AlwaysInliner.h b/linux-x64/clang/include/llvm/Transforms/IPO/AlwaysInliner.h
index 64e2523..6a208df 100644
--- a/linux-x64/clang/include/llvm/Transforms/IPO/AlwaysInliner.h
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/AlwaysInliner.h
@@ -34,6 +34,7 @@
       : InsertLifetime(InsertLifetime) {}
 
   PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+  static bool isRequired() { return true; }
 };
 
 /// Create a legacy pass manager instance of a pass to inline and remove
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/Annotation2Metadata.h b/linux-x64/clang/include/llvm/Transforms/IPO/Annotation2Metadata.h
new file mode 100644
index 0000000..cf7137b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/Annotation2Metadata.h
@@ -0,0 +1,30 @@
+//===- Annotation2Metadata.h - Add !annotation metadata. --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// New pass manager pass to convert @llvm.global.annotations to !annotation
+// metadata.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_ANNOTATION2METADATA_H
+#define LLVM_TRANSFORMS_IPO_ANNOTATION2METADATA_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+/// Pass to convert @llvm.global.annotations to !annotation metadata.
+struct Annotation2MetadataPass : public PassInfoMixin<Annotation2MetadataPass> {
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_SCCP_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/ArgumentPromotion.h b/linux-x64/clang/include/llvm/Transforms/IPO/ArgumentPromotion.h
index c8afb7b..6d6cb58 100644
--- a/linux-x64/clang/include/llvm/Transforms/IPO/ArgumentPromotion.h
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/ArgumentPromotion.h
@@ -14,6 +14,7 @@
 #include "llvm/IR/PassManager.h"
 
 namespace llvm {
+class TargetTransformInfo;
 
 /// Argument promotion pass.
 ///
@@ -26,6 +27,17 @@
 public:
   ArgumentPromotionPass(unsigned MaxElements = 3u) : MaxElements(MaxElements) {}
 
+  /// Check if callers and the callee \p F agree how promoted arguments would be
+  /// passed. The ones that they do not agree on are eliminated from the sets but
+  /// the return value has to be observed as well.
+  static bool areFunctionArgsABICompatible(
+      const Function &F, const TargetTransformInfo &TTI,
+      SmallPtrSetImpl<Argument *> &ArgsToPromote,
+      SmallPtrSetImpl<Argument *> &ByValArgsToTransform);
+
+  /// Checks if a type could have padding bytes.
+  static bool isDenselyPacked(Type *type, const DataLayout &DL);
+
   PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
                         LazyCallGraph &CG, CGSCCUpdateResult &UR);
 };
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/Attributor.h b/linux-x64/clang/include/llvm/Transforms/IPO/Attributor.h
index 8f0f9eb..dbaf945 100644
--- a/linux-x64/clang/include/llvm/Transforms/IPO/Attributor.h
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/Attributor.h
@@ -29,7 +29,7 @@
 // automatically capture a potential dependence from Q to P. This dependence
 // will cause P to be reevaluated whenever Q changes in the future.
 //
-// The Attributor will only reevaluated abstract attributes that might have
+// The Attributor will only reevaluate abstract attributes that might have
 // changed since the last iteration. That means that the Attribute will not
 // revisit all instructions/blocks/functions in the module but only query
 // an update from a subset of the abstract attributes.
@@ -60,20 +60,20 @@
 // manifest their result in the IR for passes to come.
 //
 // Attribute manifestation is not mandatory. If desired, there is support to
-// generate a single LLVM-IR attribute already in the AbstractAttribute base
-// class. In the simplest case, a subclass overloads
-// `AbstractAttribute::getManifestPosition()` and
-// `AbstractAttribute::getAttrKind()` to return the appropriate values. The
-// Attributor manifestation framework will then create and place a new attribute
-// if it is allowed to do so (based on the abstract state). Other use cases can
-// be achieved by overloading other abstract attribute methods.
+// generate a single or multiple LLVM-IR attributes already in the helper struct
+// IRAttribute. In the simplest case, a subclass inherits from IRAttribute with
+// a proper Attribute::AttrKind as template parameter. The Attributor
+// manifestation framework will then create and place a new attribute if it is
+// allowed to do so (based on the abstract state). Other use cases can be
+// achieved by overloading AbstractAttribute or IRAttribute methods.
 //
 //
 // The "mechanics" of adding a new "abstract attribute":
 // - Define a class (transitively) inheriting from AbstractAttribute and one
 //   (which could be the same) that (transitively) inherits from AbstractState.
 //   For the latter, consider the already available BooleanState and
-//   IntegerState if they fit your needs, e.g., you require only a bit-encoding.
+//   {Inc,Dec,Bit}IntegerState if they fit your needs, e.g., you require only a
+//   number tracking or bit-encoding.
 // - Implement all pure methods. Also use overloading if the attribute is not
 //   conforming with the "default" behavior: A (set of) LLVM-IR attribute(s) for
 //   an argument, call site argument, function return value, or function. See
@@ -97,19 +97,44 @@
 #ifndef LLVM_TRANSFORMS_IPO_ATTRIBUTOR_H
 #define LLVM_TRANSFORMS_IPO_ATTRIBUTOR_H
 
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Analysis/AssumeBundleQueries.h"
+#include "llvm/Analysis/CFG.h"
+#include "llvm/Analysis/CGSCCPassManager.h"
 #include "llvm/Analysis/LazyCallGraph.h"
-#include "llvm/IR/CallSite.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/MustExecute.h"
+#include "llvm/Analysis/PostDominators.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/AbstractCallSite.h"
+#include "llvm/IR/ConstantRange.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/TimeProfiler.h"
+#include "llvm/Transforms/Utils/CallGraphUpdater.h"
 
 namespace llvm {
 
+struct AADepGraphNode;
+struct AADepGraph;
+struct Attributor;
 struct AbstractAttribute;
 struct InformationCache;
+struct AAIsDead;
 
+class AAManager;
+class AAResults;
 class Function;
 
-/// Simple enum class that forces the status to be spelled out explicitly.
-///
+/// The value passed to the line option that defines the maximal initialization
+/// chain length.
+extern unsigned MaxInitializationChainLength;
+
 ///{
 enum class ChangeStatus {
   CHANGED,
@@ -118,8 +143,843 @@
 
 ChangeStatus operator|(ChangeStatus l, ChangeStatus r);
 ChangeStatus operator&(ChangeStatus l, ChangeStatus r);
+
+enum class DepClassTy {
+  REQUIRED,
+  OPTIONAL,
+};
 ///}
 
+/// The data structure for the nodes of a dependency graph
+struct AADepGraphNode {
+public:
+  virtual ~AADepGraphNode(){};
+  using DepTy = PointerIntPair<AADepGraphNode *, 1>;
+
+protected:
+  /// Set of dependency graph nodes which should be updated if this one
+  /// is updated. The bit encodes if it is optional.
+  TinyPtrVector<DepTy> Deps;
+
+  static AADepGraphNode *DepGetVal(DepTy &DT) { return DT.getPointer(); }
+  static AbstractAttribute *DepGetValAA(DepTy &DT) {
+    return cast<AbstractAttribute>(DT.getPointer());
+  }
+
+  operator AbstractAttribute *() { return cast<AbstractAttribute>(this); }
+
+public:
+  using iterator =
+      mapped_iterator<TinyPtrVector<DepTy>::iterator, decltype(&DepGetVal)>;
+  using aaiterator =
+      mapped_iterator<TinyPtrVector<DepTy>::iterator, decltype(&DepGetValAA)>;
+
+  aaiterator begin() { return aaiterator(Deps.begin(), &DepGetValAA); }
+  aaiterator end() { return aaiterator(Deps.end(), &DepGetValAA); }
+  iterator child_begin() { return iterator(Deps.begin(), &DepGetVal); }
+  iterator child_end() { return iterator(Deps.end(), &DepGetVal); }
+
+  virtual void print(raw_ostream &OS) const { OS << "AADepNode Impl\n"; }
+  TinyPtrVector<DepTy> &getDeps() { return Deps; }
+
+  friend struct Attributor;
+  friend struct AADepGraph;
+};
+
+/// The data structure for the dependency graph
+///
+/// Note that in this graph if there is an edge from A to B (A -> B),
+/// then it means that B depends on A, and when the state of A is
+/// updated, node B should also be updated
+struct AADepGraph {
+  AADepGraph() {}
+  ~AADepGraph() {}
+
+  using DepTy = AADepGraphNode::DepTy;
+  static AADepGraphNode *DepGetVal(DepTy &DT) { return DT.getPointer(); }
+  using iterator =
+      mapped_iterator<TinyPtrVector<DepTy>::iterator, decltype(&DepGetVal)>;
+
+  /// There is no root node for the dependency graph. But the SCCIterator
+  /// requires a single entry point, so we maintain a fake("synthetic") root
+  /// node that depends on every node.
+  AADepGraphNode SyntheticRoot;
+  AADepGraphNode *GetEntryNode() { return &SyntheticRoot; }
+
+  iterator begin() { return SyntheticRoot.child_begin(); }
+  iterator end() { return SyntheticRoot.child_end(); }
+
+  void viewGraph();
+
+  /// Dump graph to file
+  void dumpGraph();
+
+  /// Print dependency graph
+  void print();
+};
+
+/// Helper to describe and deal with positions in the LLVM-IR.
+///
+/// A position in the IR is described by an anchor value and an "offset" that
+/// could be the argument number, for call sites and arguments, or an indicator
+/// of the "position kind". The kinds, specified in the Kind enum below, include
+/// the locations in the attribute list, i.a., function scope and return value,
+/// as well as a distinction between call sites and functions. Finally, there
+/// are floating values that do not have a corresponding attribute list
+/// position.
+struct IRPosition {
+
+  /// The positions we distinguish in the IR.
+  enum Kind : char {
+    IRP_INVALID,  ///< An invalid position.
+    IRP_FLOAT,    ///< A position that is not associated with a spot suitable
+                  ///< for attributes. This could be any value or instruction.
+    IRP_RETURNED, ///< An attribute for the function return value.
+    IRP_CALL_SITE_RETURNED, ///< An attribute for a call site return value.
+    IRP_FUNCTION,           ///< An attribute for a function (scope).
+    IRP_CALL_SITE,          ///< An attribute for a call site (function scope).
+    IRP_ARGUMENT,           ///< An attribute for a function argument.
+    IRP_CALL_SITE_ARGUMENT, ///< An attribute for a call site argument.
+  };
+
+  /// Default constructor available to create invalid positions implicitly. All
+  /// other positions need to be created explicitly through the appropriate
+  /// static member function.
+  IRPosition() : Enc(nullptr, ENC_VALUE) { verify(); }
+
+  /// Create a position describing the value of \p V.
+  static const IRPosition value(const Value &V) {
+    if (auto *Arg = dyn_cast<Argument>(&V))
+      return IRPosition::argument(*Arg);
+    if (auto *CB = dyn_cast<CallBase>(&V))
+      return IRPosition::callsite_returned(*CB);
+    return IRPosition(const_cast<Value &>(V), IRP_FLOAT);
+  }
+
+  /// Create a position describing the function scope of \p F.
+  static const IRPosition function(const Function &F) {
+    return IRPosition(const_cast<Function &>(F), IRP_FUNCTION);
+  }
+
+  /// Create a position describing the returned value of \p F.
+  static const IRPosition returned(const Function &F) {
+    return IRPosition(const_cast<Function &>(F), IRP_RETURNED);
+  }
+
+  /// Create a position describing the argument \p Arg.
+  static const IRPosition argument(const Argument &Arg) {
+    return IRPosition(const_cast<Argument &>(Arg), IRP_ARGUMENT);
+  }
+
+  /// Create a position describing the function scope of \p CB.
+  static const IRPosition callsite_function(const CallBase &CB) {
+    return IRPosition(const_cast<CallBase &>(CB), IRP_CALL_SITE);
+  }
+
+  /// Create a position describing the returned value of \p CB.
+  static const IRPosition callsite_returned(const CallBase &CB) {
+    return IRPosition(const_cast<CallBase &>(CB), IRP_CALL_SITE_RETURNED);
+  }
+
+  /// Create a position describing the argument of \p CB at position \p ArgNo.
+  static const IRPosition callsite_argument(const CallBase &CB,
+                                            unsigned ArgNo) {
+    return IRPosition(const_cast<Use &>(CB.getArgOperandUse(ArgNo)),
+                      IRP_CALL_SITE_ARGUMENT);
+  }
+
+  /// Create a position describing the argument of \p ACS at position \p ArgNo.
+  static const IRPosition callsite_argument(AbstractCallSite ACS,
+                                            unsigned ArgNo) {
+    if (ACS.getNumArgOperands() <= ArgNo)
+      return IRPosition();
+    int CSArgNo = ACS.getCallArgOperandNo(ArgNo);
+    if (CSArgNo >= 0)
+      return IRPosition::callsite_argument(
+          cast<CallBase>(*ACS.getInstruction()), CSArgNo);
+    return IRPosition();
+  }
+
+  /// Create a position with function scope matching the "context" of \p IRP.
+  /// If \p IRP is a call site (see isAnyCallSitePosition()) then the result
+  /// will be a call site position, otherwise the function position of the
+  /// associated function.
+  static const IRPosition function_scope(const IRPosition &IRP) {
+    if (IRP.isAnyCallSitePosition()) {
+      return IRPosition::callsite_function(
+          cast<CallBase>(IRP.getAnchorValue()));
+    }
+    assert(IRP.getAssociatedFunction());
+    return IRPosition::function(*IRP.getAssociatedFunction());
+  }
+
+  bool operator==(const IRPosition &RHS) const { return Enc == RHS.Enc; }
+  bool operator!=(const IRPosition &RHS) const { return !(*this == RHS); }
+
+  /// Return the value this abstract attribute is anchored with.
+  ///
+  /// The anchor value might not be the associated value if the latter is not
+  /// sufficient to determine where arguments will be manifested. This is, so
+  /// far, only the case for call site arguments as the value is not sufficient
+  /// to pinpoint them. Instead, we can use the call site as an anchor.
+  Value &getAnchorValue() const {
+    switch (getEncodingBits()) {
+    case ENC_VALUE:
+    case ENC_RETURNED_VALUE:
+    case ENC_FLOATING_FUNCTION:
+      return *getAsValuePtr();
+    case ENC_CALL_SITE_ARGUMENT_USE:
+      return *(getAsUsePtr()->getUser());
+    default:
+      llvm_unreachable("Unkown encoding!");
+    };
+  }
+
+  /// Return the associated function, if any.
+  Function *getAssociatedFunction() const {
+    if (auto *CB = dyn_cast<CallBase>(&getAnchorValue())) {
+      // We reuse the logic that associates callback calles to arguments of a
+      // call site here to identify the callback callee as the associated
+      // function.
+      if (Argument *Arg = getAssociatedArgument())
+        return Arg->getParent();
+      return CB->getCalledFunction();
+    }
+    return getAnchorScope();
+  }
+
+  /// Return the associated argument, if any.
+  Argument *getAssociatedArgument() const;
+
+  /// Return true if the position refers to a function interface, that is the
+  /// function scope, the function return, or an argument.
+  bool isFnInterfaceKind() const {
+    switch (getPositionKind()) {
+    case IRPosition::IRP_FUNCTION:
+    case IRPosition::IRP_RETURNED:
+    case IRPosition::IRP_ARGUMENT:
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  /// Return the Function surrounding the anchor value.
+  Function *getAnchorScope() const {
+    Value &V = getAnchorValue();
+    if (isa<Function>(V))
+      return &cast<Function>(V);
+    if (isa<Argument>(V))
+      return cast<Argument>(V).getParent();
+    if (isa<Instruction>(V))
+      return cast<Instruction>(V).getFunction();
+    return nullptr;
+  }
+
+  /// Return the context instruction, if any.
+  Instruction *getCtxI() const {
+    Value &V = getAnchorValue();
+    if (auto *I = dyn_cast<Instruction>(&V))
+      return I;
+    if (auto *Arg = dyn_cast<Argument>(&V))
+      if (!Arg->getParent()->isDeclaration())
+        return &Arg->getParent()->getEntryBlock().front();
+    if (auto *F = dyn_cast<Function>(&V))
+      if (!F->isDeclaration())
+        return &(F->getEntryBlock().front());
+    return nullptr;
+  }
+
+  /// Return the value this abstract attribute is associated with.
+  Value &getAssociatedValue() const {
+    if (getCallSiteArgNo() < 0 || isa<Argument>(&getAnchorValue()))
+      return getAnchorValue();
+    assert(isa<CallBase>(&getAnchorValue()) && "Expected a call base!");
+    return *cast<CallBase>(&getAnchorValue())
+                ->getArgOperand(getCallSiteArgNo());
+  }
+
+  /// Return the type this abstract attribute is associated with.
+  Type *getAssociatedType() const {
+    if (getPositionKind() == IRPosition::IRP_RETURNED)
+      return getAssociatedFunction()->getReturnType();
+    return getAssociatedValue().getType();
+  }
+
+  /// Return the callee argument number of the associated value if it is an
+  /// argument or call site argument, otherwise a negative value. In contrast to
+  /// `getCallSiteArgNo` this method will always return the "argument number"
+  /// from the perspective of the callee. This may not the same as the call site
+  /// if this is a callback call.
+  int getCalleeArgNo() const {
+    return getArgNo(/* CallbackCalleeArgIfApplicable */ true);
+  }
+
+  /// Return the call site argument number of the associated value if it is an
+  /// argument or call site argument, otherwise a negative value. In contrast to
+  /// `getCalleArgNo` this method will always return the "operand number" from
+  /// the perspective of the call site. This may not the same as the callee
+  /// perspective if this is a callback call.
+  int getCallSiteArgNo() const {
+    return getArgNo(/* CallbackCalleeArgIfApplicable */ false);
+  }
+
+  /// Return the index in the attribute list for this position.
+  unsigned getAttrIdx() const {
+    switch (getPositionKind()) {
+    case IRPosition::IRP_INVALID:
+    case IRPosition::IRP_FLOAT:
+      break;
+    case IRPosition::IRP_FUNCTION:
+    case IRPosition::IRP_CALL_SITE:
+      return AttributeList::FunctionIndex;
+    case IRPosition::IRP_RETURNED:
+    case IRPosition::IRP_CALL_SITE_RETURNED:
+      return AttributeList::ReturnIndex;
+    case IRPosition::IRP_ARGUMENT:
+    case IRPosition::IRP_CALL_SITE_ARGUMENT:
+      return getCallSiteArgNo() + AttributeList::FirstArgIndex;
+    }
+    llvm_unreachable(
+        "There is no attribute index for a floating or invalid position!");
+  }
+
+  /// Return the associated position kind.
+  Kind getPositionKind() const {
+    char EncodingBits = getEncodingBits();
+    if (EncodingBits == ENC_CALL_SITE_ARGUMENT_USE)
+      return IRP_CALL_SITE_ARGUMENT;
+    if (EncodingBits == ENC_FLOATING_FUNCTION)
+      return IRP_FLOAT;
+
+    Value *V = getAsValuePtr();
+    if (!V)
+      return IRP_INVALID;
+    if (isa<Argument>(V))
+      return IRP_ARGUMENT;
+    if (isa<Function>(V))
+      return isReturnPosition(EncodingBits) ? IRP_RETURNED : IRP_FUNCTION;
+    if (isa<CallBase>(V))
+      return isReturnPosition(EncodingBits) ? IRP_CALL_SITE_RETURNED
+                                            : IRP_CALL_SITE;
+    return IRP_FLOAT;
+  }
+
+  /// TODO: Figure out if the attribute related helper functions should live
+  ///       here or somewhere else.
+
+  /// Return true if any kind in \p AKs existing in the IR at a position that
+  /// will affect this one. See also getAttrs(...).
+  /// \param IgnoreSubsumingPositions Flag to determine if subsuming positions,
+  ///                                 e.g., the function position if this is an
+  ///                                 argument position, should be ignored.
+  bool hasAttr(ArrayRef<Attribute::AttrKind> AKs,
+               bool IgnoreSubsumingPositions = false,
+               Attributor *A = nullptr) const;
+
+  /// Return the attributes of any kind in \p AKs existing in the IR at a
+  /// position that will affect this one. While each position can only have a
+  /// single attribute of any kind in \p AKs, there are "subsuming" positions
+  /// that could have an attribute as well. This method returns all attributes
+  /// found in \p Attrs.
+  /// \param IgnoreSubsumingPositions Flag to determine if subsuming positions,
+  ///                                 e.g., the function position if this is an
+  ///                                 argument position, should be ignored.
+  void getAttrs(ArrayRef<Attribute::AttrKind> AKs,
+                SmallVectorImpl<Attribute> &Attrs,
+                bool IgnoreSubsumingPositions = false,
+                Attributor *A = nullptr) const;
+
+  /// Remove the attribute of kind \p AKs existing in the IR at this position.
+  void removeAttrs(ArrayRef<Attribute::AttrKind> AKs) const {
+    if (getPositionKind() == IRP_INVALID || getPositionKind() == IRP_FLOAT)
+      return;
+
+    AttributeList AttrList;
+    auto *CB = dyn_cast<CallBase>(&getAnchorValue());
+    if (CB)
+      AttrList = CB->getAttributes();
+    else
+      AttrList = getAssociatedFunction()->getAttributes();
+
+    LLVMContext &Ctx = getAnchorValue().getContext();
+    for (Attribute::AttrKind AK : AKs)
+      AttrList = AttrList.removeAttribute(Ctx, getAttrIdx(), AK);
+
+    if (CB)
+      CB->setAttributes(AttrList);
+    else
+      getAssociatedFunction()->setAttributes(AttrList);
+  }
+
+  bool isAnyCallSitePosition() const {
+    switch (getPositionKind()) {
+    case IRPosition::IRP_CALL_SITE:
+    case IRPosition::IRP_CALL_SITE_RETURNED:
+    case IRPosition::IRP_CALL_SITE_ARGUMENT:
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  /// Return true if the position is an argument or call site argument.
+  bool isArgumentPosition() const {
+    switch (getPositionKind()) {
+    case IRPosition::IRP_ARGUMENT:
+    case IRPosition::IRP_CALL_SITE_ARGUMENT:
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  /// Special DenseMap key values.
+  ///
+  ///{
+  static const IRPosition EmptyKey;
+  static const IRPosition TombstoneKey;
+  ///}
+
+  /// Conversion into a void * to allow reuse of pointer hashing.
+  operator void *() const { return Enc.getOpaqueValue(); }
+
+private:
+  /// Private constructor for special values only!
+  explicit IRPosition(void *Ptr) { Enc.setFromOpaqueValue(Ptr); }
+
+  /// IRPosition anchored at \p AnchorVal with kind/argument numbet \p PK.
+  explicit IRPosition(Value &AnchorVal, Kind PK) {
+    switch (PK) {
+    case IRPosition::IRP_INVALID:
+      llvm_unreachable("Cannot create invalid IRP with an anchor value!");
+      break;
+    case IRPosition::IRP_FLOAT:
+      // Special case for floating functions.
+      if (isa<Function>(AnchorVal))
+        Enc = {&AnchorVal, ENC_FLOATING_FUNCTION};
+      else
+        Enc = {&AnchorVal, ENC_VALUE};
+      break;
+    case IRPosition::IRP_FUNCTION:
+    case IRPosition::IRP_CALL_SITE:
+      Enc = {&AnchorVal, ENC_VALUE};
+      break;
+    case IRPosition::IRP_RETURNED:
+    case IRPosition::IRP_CALL_SITE_RETURNED:
+      Enc = {&AnchorVal, ENC_RETURNED_VALUE};
+      break;
+    case IRPosition::IRP_ARGUMENT:
+      Enc = {&AnchorVal, ENC_VALUE};
+      break;
+    case IRPosition::IRP_CALL_SITE_ARGUMENT:
+      llvm_unreachable(
+          "Cannot create call site argument IRP with an anchor value!");
+      break;
+    }
+    verify();
+  }
+
+  /// Return the callee argument number of the associated value if it is an
+  /// argument or call site argument. See also `getCalleeArgNo` and
+  /// `getCallSiteArgNo`.
+  int getArgNo(bool CallbackCalleeArgIfApplicable) const {
+    if (CallbackCalleeArgIfApplicable)
+      if (Argument *Arg = getAssociatedArgument())
+        return Arg->getArgNo();
+    switch (getPositionKind()) {
+    case IRPosition::IRP_ARGUMENT:
+      return cast<Argument>(getAsValuePtr())->getArgNo();
+    case IRPosition::IRP_CALL_SITE_ARGUMENT: {
+      Use &U = *getAsUsePtr();
+      return cast<CallBase>(U.getUser())->getArgOperandNo(&U);
+    }
+    default:
+      return -1;
+    }
+  }
+
+  /// IRPosition for the use \p U. The position kind \p PK needs to be
+  /// IRP_CALL_SITE_ARGUMENT, the anchor value is the user, the associated value
+  /// the used value.
+  explicit IRPosition(Use &U, Kind PK) {
+    assert(PK == IRP_CALL_SITE_ARGUMENT &&
+           "Use constructor is for call site arguments only!");
+    Enc = {&U, ENC_CALL_SITE_ARGUMENT_USE};
+    verify();
+  }
+
+  /// Verify internal invariants.
+  void verify();
+
+  /// Return the attributes of kind \p AK existing in the IR as attribute.
+  bool getAttrsFromIRAttr(Attribute::AttrKind AK,
+                          SmallVectorImpl<Attribute> &Attrs) const;
+
+  /// Return the attributes of kind \p AK existing in the IR as operand bundles
+  /// of an llvm.assume.
+  bool getAttrsFromAssumes(Attribute::AttrKind AK,
+                           SmallVectorImpl<Attribute> &Attrs,
+                           Attributor &A) const;
+
+  /// Return the underlying pointer as Value *, valid for all positions but
+  /// IRP_CALL_SITE_ARGUMENT.
+  Value *getAsValuePtr() const {
+    assert(getEncodingBits() != ENC_CALL_SITE_ARGUMENT_USE &&
+           "Not a value pointer!");
+    return reinterpret_cast<Value *>(Enc.getPointer());
+  }
+
+  /// Return the underlying pointer as Use *, valid only for
+  /// IRP_CALL_SITE_ARGUMENT positions.
+  Use *getAsUsePtr() const {
+    assert(getEncodingBits() == ENC_CALL_SITE_ARGUMENT_USE &&
+           "Not a value pointer!");
+    return reinterpret_cast<Use *>(Enc.getPointer());
+  }
+
+  /// Return true if \p EncodingBits describe a returned or call site returned
+  /// position.
+  static bool isReturnPosition(char EncodingBits) {
+    return EncodingBits == ENC_RETURNED_VALUE;
+  }
+
+  /// Return true if the encoding bits describe a returned or call site returned
+  /// position.
+  bool isReturnPosition() const { return isReturnPosition(getEncodingBits()); }
+
+  /// The encoding of the IRPosition is a combination of a pointer and two
+  /// encoding bits. The values of the encoding bits are defined in the enum
+  /// below. The pointer is either a Value* (for the first three encoding bit
+  /// combinations) or Use* (for ENC_CALL_SITE_ARGUMENT_USE).
+  ///
+  ///{
+  enum {
+    ENC_VALUE = 0b00,
+    ENC_RETURNED_VALUE = 0b01,
+    ENC_FLOATING_FUNCTION = 0b10,
+    ENC_CALL_SITE_ARGUMENT_USE = 0b11,
+  };
+
+  // Reserve the maximal amount of bits so there is no need to mask out the
+  // remaining ones. We will not encode anything else in the pointer anyway.
+  static constexpr int NumEncodingBits =
+      PointerLikeTypeTraits<void *>::NumLowBitsAvailable;
+  static_assert(NumEncodingBits >= 2, "At least two bits are required!");
+
+  /// The pointer with the encoding bits.
+  PointerIntPair<void *, NumEncodingBits, char> Enc;
+  ///}
+
+  /// Return the encoding bits.
+  char getEncodingBits() const { return Enc.getInt(); }
+};
+
+/// Helper that allows IRPosition as a key in a DenseMap.
+template <> struct DenseMapInfo<IRPosition> : DenseMapInfo<void *> {
+  static inline IRPosition getEmptyKey() { return IRPosition::EmptyKey; }
+  static inline IRPosition getTombstoneKey() {
+    return IRPosition::TombstoneKey;
+  }
+};
+
+/// A visitor class for IR positions.
+///
+/// Given a position P, the SubsumingPositionIterator allows to visit "subsuming
+/// positions" wrt. attributes/information. Thus, if a piece of information
+/// holds for a subsuming position, it also holds for the position P.
+///
+/// The subsuming positions always include the initial position and then,
+/// depending on the position kind, additionally the following ones:
+/// - for IRP_RETURNED:
+///   - the function (IRP_FUNCTION)
+/// - for IRP_ARGUMENT:
+///   - the function (IRP_FUNCTION)
+/// - for IRP_CALL_SITE:
+///   - the callee (IRP_FUNCTION), if known
+/// - for IRP_CALL_SITE_RETURNED:
+///   - the callee (IRP_RETURNED), if known
+///   - the call site (IRP_FUNCTION)
+///   - the callee (IRP_FUNCTION), if known
+/// - for IRP_CALL_SITE_ARGUMENT:
+///   - the argument of the callee (IRP_ARGUMENT), if known
+///   - the callee (IRP_FUNCTION), if known
+///   - the position the call site argument is associated with if it is not
+///     anchored to the call site, e.g., if it is an argument then the argument
+///     (IRP_ARGUMENT)
+class SubsumingPositionIterator {
+  SmallVector<IRPosition, 4> IRPositions;
+  using iterator = decltype(IRPositions)::iterator;
+
+public:
+  SubsumingPositionIterator(const IRPosition &IRP);
+  iterator begin() { return IRPositions.begin(); }
+  iterator end() { return IRPositions.end(); }
+};
+
+/// Wrapper for FunctoinAnalysisManager.
+struct AnalysisGetter {
+  template <typename Analysis>
+  typename Analysis::Result *getAnalysis(const Function &F) {
+    if (!FAM || !F.getParent())
+      return nullptr;
+    return &FAM->getResult<Analysis>(const_cast<Function &>(F));
+  }
+
+  AnalysisGetter(FunctionAnalysisManager &FAM) : FAM(&FAM) {}
+  AnalysisGetter() {}
+
+private:
+  FunctionAnalysisManager *FAM = nullptr;
+};
+
+/// Data structure to hold cached (LLVM-IR) information.
+///
+/// All attributes are given an InformationCache object at creation time to
+/// avoid inspection of the IR by all of them individually. This default
+/// InformationCache will hold information required by 'default' attributes,
+/// thus the ones deduced when Attributor::identifyDefaultAbstractAttributes(..)
+/// is called.
+///
+/// If custom abstract attributes, registered manually through
+/// Attributor::registerAA(...), need more information, especially if it is not
+/// reusable, it is advised to inherit from the InformationCache and cast the
+/// instance down in the abstract attributes.
+struct InformationCache {
+  InformationCache(const Module &M, AnalysisGetter &AG,
+                   BumpPtrAllocator &Allocator, SetVector<Function *> *CGSCC)
+      : DL(M.getDataLayout()), Allocator(Allocator),
+        Explorer(
+            /* ExploreInterBlock */ true, /* ExploreCFGForward */ true,
+            /* ExploreCFGBackward */ true,
+            /* LIGetter */
+            [&](const Function &F) { return AG.getAnalysis<LoopAnalysis>(F); },
+            /* DTGetter */
+            [&](const Function &F) {
+              return AG.getAnalysis<DominatorTreeAnalysis>(F);
+            },
+            /* PDTGetter */
+            [&](const Function &F) {
+              return AG.getAnalysis<PostDominatorTreeAnalysis>(F);
+            }),
+        AG(AG), CGSCC(CGSCC) {
+    if (CGSCC)
+      initializeModuleSlice(*CGSCC);
+  }
+
+  ~InformationCache() {
+    // The FunctionInfo objects are allocated via a BumpPtrAllocator, we call
+    // the destructor manually.
+    for (auto &It : FuncInfoMap)
+      It.getSecond()->~FunctionInfo();
+  }
+
+  /// Apply \p CB to all uses of \p F. If \p LookThroughConstantExprUses is
+  /// true, constant expression users are not given to \p CB but their uses are
+  /// traversed transitively.
+  template <typename CBTy>
+  static void foreachUse(Function &F, CBTy CB,
+                         bool LookThroughConstantExprUses = true) {
+    SmallVector<Use *, 8> Worklist(make_pointer_range(F.uses()));
+
+    for (unsigned Idx = 0; Idx < Worklist.size(); ++Idx) {
+      Use &U = *Worklist[Idx];
+
+      // Allow use in constant bitcasts and simply look through them.
+      if (LookThroughConstantExprUses && isa<ConstantExpr>(U.getUser())) {
+        for (Use &CEU : cast<ConstantExpr>(U.getUser())->uses())
+          Worklist.push_back(&CEU);
+        continue;
+      }
+
+      CB(U);
+    }
+  }
+
+  /// Initialize the ModuleSlice member based on \p SCC. ModuleSlices contains
+  /// (a subset of) all functions that we can look at during this SCC traversal.
+  /// This includes functions (transitively) called from the SCC and the
+  /// (transitive) callers of SCC functions. We also can look at a function if
+  /// there is a "reference edge", i.a., if the function somehow uses (!=calls)
+  /// a function in the SCC or a caller of a function in the SCC.
+  void initializeModuleSlice(SetVector<Function *> &SCC) {
+    ModuleSlice.insert(SCC.begin(), SCC.end());
+
+    SmallPtrSet<Function *, 16> Seen;
+    SmallVector<Function *, 16> Worklist(SCC.begin(), SCC.end());
+    while (!Worklist.empty()) {
+      Function *F = Worklist.pop_back_val();
+      ModuleSlice.insert(F);
+
+      for (Instruction &I : instructions(*F))
+        if (auto *CB = dyn_cast<CallBase>(&I))
+          if (Function *Callee = CB->getCalledFunction())
+            if (Seen.insert(Callee).second)
+              Worklist.push_back(Callee);
+    }
+
+    Seen.clear();
+    Worklist.append(SCC.begin(), SCC.end());
+    while (!Worklist.empty()) {
+      Function *F = Worklist.pop_back_val();
+      ModuleSlice.insert(F);
+
+      // Traverse all transitive uses.
+      foreachUse(*F, [&](Use &U) {
+        if (auto *UsrI = dyn_cast<Instruction>(U.getUser()))
+          if (Seen.insert(UsrI->getFunction()).second)
+            Worklist.push_back(UsrI->getFunction());
+      });
+    }
+  }
+
+  /// The slice of the module we are allowed to look at.
+  SmallPtrSet<Function *, 8> ModuleSlice;
+
+  /// A vector type to hold instructions.
+  using InstructionVectorTy = SmallVector<Instruction *, 8>;
+
+  /// A map type from opcodes to instructions with this opcode.
+  using OpcodeInstMapTy = DenseMap<unsigned, InstructionVectorTy *>;
+
+  /// Return the map that relates "interesting" opcodes with all instructions
+  /// with that opcode in \p F.
+  OpcodeInstMapTy &getOpcodeInstMapForFunction(const Function &F) {
+    return getFunctionInfo(F).OpcodeInstMap;
+  }
+
+  /// Return the instructions in \p F that may read or write memory.
+  InstructionVectorTy &getReadOrWriteInstsForFunction(const Function &F) {
+    return getFunctionInfo(F).RWInsts;
+  }
+
+  /// Return MustBeExecutedContextExplorer
+  MustBeExecutedContextExplorer &getMustBeExecutedContextExplorer() {
+    return Explorer;
+  }
+
+  /// Return TargetLibraryInfo for function \p F.
+  TargetLibraryInfo *getTargetLibraryInfoForFunction(const Function &F) {
+    return AG.getAnalysis<TargetLibraryAnalysis>(F);
+  }
+
+  /// Return AliasAnalysis Result for function \p F.
+  AAResults *getAAResultsForFunction(const Function &F);
+
+  /// Return true if \p Arg is involved in a must-tail call, thus the argument
+  /// of the caller or callee.
+  bool isInvolvedInMustTailCall(const Argument &Arg) {
+    FunctionInfo &FI = getFunctionInfo(*Arg.getParent());
+    return FI.CalledViaMustTail || FI.ContainsMustTailCall;
+  }
+
+  /// Return the analysis result from a pass \p AP for function \p F.
+  template <typename AP>
+  typename AP::Result *getAnalysisResultForFunction(const Function &F) {
+    return AG.getAnalysis<AP>(F);
+  }
+
+  /// Return SCC size on call graph for function \p F or 0 if unknown.
+  unsigned getSccSize(const Function &F) {
+    if (CGSCC && CGSCC->count(const_cast<Function *>(&F)))
+      return CGSCC->size();
+    return 0;
+  }
+
+  /// Return datalayout used in the module.
+  const DataLayout &getDL() { return DL; }
+
+  /// Return the map conaining all the knowledge we have from `llvm.assume`s.
+  const RetainedKnowledgeMap &getKnowledgeMap() const { return KnowledgeMap; }
+
+  /// Return if \p To is potentially reachable form \p From or not
+  /// If the same query was answered, return cached result
+  bool getPotentiallyReachable(const Instruction &From, const Instruction &To) {
+    auto KeyPair = std::make_pair(&From, &To);
+    auto Iter = PotentiallyReachableMap.find(KeyPair);
+    if (Iter != PotentiallyReachableMap.end())
+      return Iter->second;
+    const Function &F = *From.getFunction();
+    bool Result = isPotentiallyReachable(
+        &From, &To, nullptr, AG.getAnalysis<DominatorTreeAnalysis>(F),
+        AG.getAnalysis<LoopAnalysis>(F));
+    PotentiallyReachableMap.insert(std::make_pair(KeyPair, Result));
+    return Result;
+  }
+
+  /// Check whether \p F is part of module slice.
+  bool isInModuleSlice(const Function &F) {
+    return ModuleSlice.count(const_cast<Function *>(&F));
+  }
+
+private:
+  struct FunctionInfo {
+    ~FunctionInfo();
+
+    /// A nested map that remembers all instructions in a function with a
+    /// certain instruction opcode (Instruction::getOpcode()).
+    OpcodeInstMapTy OpcodeInstMap;
+
+    /// A map from functions to their instructions that may read or write
+    /// memory.
+    InstructionVectorTy RWInsts;
+
+    /// Function is called by a `musttail` call.
+    bool CalledViaMustTail;
+
+    /// Function contains a `musttail` call.
+    bool ContainsMustTailCall;
+  };
+
+  /// A map type from functions to informatio about it.
+  DenseMap<const Function *, FunctionInfo *> FuncInfoMap;
+
+  /// Return information about the function \p F, potentially by creating it.
+  FunctionInfo &getFunctionInfo(const Function &F) {
+    FunctionInfo *&FI = FuncInfoMap[&F];
+    if (!FI) {
+      FI = new (Allocator) FunctionInfo();
+      initializeInformationCache(F, *FI);
+    }
+    return *FI;
+  }
+
+  /// Initialize the function information cache \p FI for the function \p F.
+  ///
+  /// This method needs to be called for all function that might be looked at
+  /// through the information cache interface *prior* to looking at them.
+  void initializeInformationCache(const Function &F, FunctionInfo &FI);
+
+  /// The datalayout used in the module.
+  const DataLayout &DL;
+
+  /// The allocator used to allocate memory, e.g. for `FunctionInfo`s.
+  BumpPtrAllocator &Allocator;
+
+  /// MustBeExecutedContextExplorer
+  MustBeExecutedContextExplorer Explorer;
+
+  /// A map with knowledge retained in `llvm.assume` instructions.
+  RetainedKnowledgeMap KnowledgeMap;
+
+  /// Getters for analysis.
+  AnalysisGetter &AG;
+
+  /// The underlying CGSCC, or null if not available.
+  SetVector<Function *> *CGSCC;
+
+  /// Set of inlineable functions
+  SmallPtrSet<const Function *, 8> InlineableFunctions;
+
+  /// A map for caching results of queries for isPotentiallyReachable
+  DenseMap<std::pair<const Instruction *, const Instruction *>, bool>
+      PotentiallyReachableMap;
+
+  /// Give the Attributor access to the members so
+  /// Attributor::identifyDefaultAbstractAttributes(...) can initialize them.
+  friend struct Attributor;
+};
+
 /// The fixpoint analysis framework that orchestrates the attribute deduction.
 ///
 /// The Attributor provides a general abstract analysis framework (guided
@@ -148,7 +1008,20 @@
 /// NOTE: The mechanics of adding a new "concrete" abstract attribute are
 ///       described in the file comment.
 struct Attributor {
-  ~Attributor() { DeleteContainerPointers(AllAbstractAttributes); }
+  /// Constructor
+  ///
+  /// \param Functions The set of functions we are deriving attributes for.
+  /// \param InfoCache Cache to hold various information accessible for
+  ///                  the abstract attributes.
+  /// \param CGUpdater Helper to update an underlying call graph.
+  /// \param Allowed If not null, a set limiting the attribute opportunities.
+  Attributor(SetVector<Function *> &Functions, InformationCache &InfoCache,
+             CallGraphUpdater &CGUpdater,
+             DenseSet<const char *> *Allowed = nullptr)
+      : Allocator(InfoCache.Allocator), Functions(Functions),
+        InfoCache(InfoCache), CGUpdater(CGUpdater), Allowed(Allowed) {}
+
+  ~Attributor();
 
   /// Run the analyses until a fixpoint is reached or enforced (timeout).
   ///
@@ -158,10 +1031,11 @@
   /// \Returns CHANGED if the IR was changed, otherwise UNCHANGED.
   ChangeStatus run();
 
-  /// Lookup an abstract attribute of type \p AAType anchored at value \p V and
-  /// argument number \p ArgNo. If no attribute is found and \p V is a call base
-  /// instruction, the called function is tried as a value next. Thus, the
-  /// returned abstract attribute might be anchored at the callee of \p V.
+  /// Lookup an abstract attribute of type \p AAType at position \p IRP. While
+  /// no abstract attribute is found equivalent positions are checked, see
+  /// SubsumingPositionIterator. Thus, the returned abstract attribute
+  /// might be anchored at a different position, e.g., the callee if \p IRP is a
+  /// call base.
   ///
   /// This method is the only (supported) way an abstract attribute can retrieve
   /// information from another abstract attribute. As an example, take an
@@ -170,160 +1044,652 @@
   /// most optimistic information for other abstract attributes in-flight, e.g.
   /// the one reasoning about the "captured" state for the argument or the one
   /// reasoning on the memory access behavior of the function as a whole.
+  ///
+  /// If the flag \p TrackDependence is set to false the dependence from
+  /// \p QueryingAA to the return abstract attribute is not automatically
+  /// recorded. This should only be used if the caller will record the
+  /// dependence explicitly if necessary, thus if it the returned abstract
+  /// attribute is used for reasoning. To record the dependences explicitly use
+  /// the `Attributor::recordDependence` method.
   template <typename AAType>
-  const AAType *getAAFor(AbstractAttribute &QueryingAA, const Value &V,
-                         int ArgNo = -1) {
-    static_assert(std::is_base_of<AbstractAttribute, AAType>::value,
-                  "Cannot query an attribute with a type not derived from "
-                  "'AbstractAttribute'!");
-    assert(AAType::ID != Attribute::None &&
-           "Cannot lookup generic abstract attributes!");
+  const AAType &getAAFor(const AbstractAttribute &QueryingAA,
+                         const IRPosition &IRP, bool TrackDependence = true,
+                         DepClassTy DepClass = DepClassTy::REQUIRED) {
+    return getOrCreateAAFor<AAType>(IRP, &QueryingAA, TrackDependence, DepClass,
+                                    /* ForceUpdate */ false);
+  }
 
-    // Determine the argument number automatically for llvm::Arguments.
-    if (auto *Arg = dyn_cast<Argument>(&V))
-      ArgNo = Arg->getArgNo();
+  /// Similar to getAAFor but the return abstract attribute will be updated (via
+  /// `AbstractAttribute::update`) even if it is found in the cache. This is
+  /// especially useful for AAIsDead as changes in liveness can make updates
+  /// possible/useful that were not happening before as the abstract attribute
+  /// was assumed dead.
+  template <typename AAType>
+  const AAType &getAndUpdateAAFor(const AbstractAttribute &QueryingAA,
+                                  const IRPosition &IRP,
+                                  bool TrackDependence = true,
+                                  DepClassTy DepClass = DepClassTy::REQUIRED) {
+    return getOrCreateAAFor<AAType>(IRP, &QueryingAA, TrackDependence, DepClass,
+                                    /* ForceUpdate */ true);
+  }
 
-    // If a function was given together with an argument number, perform the
-    // lookup for the actual argument instead. Don't do it for variadic
-    // arguments.
-    if (ArgNo >= 0 && isa<Function>(&V) &&
-        cast<Function>(&V)->arg_size() > (size_t)ArgNo)
-      return getAAFor<AAType>(
-          QueryingAA, *(cast<Function>(&V)->arg_begin() + ArgNo), ArgNo);
+  /// The version of getAAFor that allows to omit a querying abstract
+  /// attribute. Using this after Attributor started running is restricted to
+  /// only the Attributor itself. Initial seeding of AAs can be done via this
+  /// function.
+  /// NOTE: ForceUpdate is ignored in any stage other than the update stage.
+  template <typename AAType>
+  const AAType &getOrCreateAAFor(const IRPosition &IRP,
+                                 const AbstractAttribute *QueryingAA = nullptr,
+                                 bool TrackDependence = false,
+                                 DepClassTy DepClass = DepClassTy::OPTIONAL,
+                                 bool ForceUpdate = false) {
+    if (AAType *AAPtr = lookupAAFor<AAType>(IRP, QueryingAA, TrackDependence)) {
+      if (ForceUpdate && Phase == AttributorPhase::UPDATE)
+        updateAA(*AAPtr);
+      return *AAPtr;
+    }
 
-    // Lookup the abstract attribute of type AAType. If found, return it after
-    // registering a dependence of QueryingAA on the one returned attribute.
-    const auto &KindToAbstractAttributeMap = AAMap.lookup({&V, ArgNo});
-    if (AAType *AA = static_cast<AAType *>(
-            KindToAbstractAttributeMap.lookup(AAType::ID))) {
-      QueryMap[AA].insert(&QueryingAA);
+    // No matching attribute found, create one.
+    // Use the static create method.
+    auto &AA = AAType::createForPosition(IRP, *this);
+
+    // If we are currenty seeding attributes, enforce seeding rules.
+    if (Phase == AttributorPhase::SEEDING && !shouldSeedAttribute(AA)) {
+      AA.getState().indicatePessimisticFixpoint();
       return AA;
     }
 
-    // If no abstract attribute was found and we look for a call site argument,
-    // defer to the actual argument instead.
-    ImmutableCallSite ICS(&V);
-    if (ICS && ICS.getCalledValue())
-      return getAAFor<AAType>(QueryingAA, *ICS.getCalledValue(), ArgNo);
+    registerAA(AA);
 
-    // No matching attribute found
-    return nullptr;
+    // For now we ignore naked and optnone functions.
+    bool Invalidate = Allowed && !Allowed->count(&AAType::ID);
+    const Function *FnScope = IRP.getAnchorScope();
+    if (FnScope)
+      Invalidate |= FnScope->hasFnAttribute(Attribute::Naked) ||
+                    FnScope->hasFnAttribute(Attribute::OptimizeNone);
+
+    // Avoid too many nested initializations to prevent a stack overflow.
+    Invalidate |= InitializationChainLength > MaxInitializationChainLength;
+
+    // Bootstrap the new attribute with an initial update to propagate
+    // information, e.g., function -> call site. If it is not on a given
+    // Allowed we will not perform updates at all.
+    if (Invalidate) {
+      AA.getState().indicatePessimisticFixpoint();
+      return AA;
+    }
+
+    {
+      TimeTraceScope TimeScope(AA.getName() + "::initialize");
+      ++InitializationChainLength;
+      AA.initialize(*this);
+      --InitializationChainLength;
+    }
+
+    // Initialize and update is allowed for code outside of the current function
+    // set, but only if it is part of module slice we are allowed to look at.
+    // Only exception is AAIsDeadFunction whose initialization is prevented
+    // directly, since we don't to compute it twice.
+    if (FnScope && !Functions.count(const_cast<Function *>(FnScope))) {
+      if (!getInfoCache().isInModuleSlice(*FnScope)) {
+        AA.getState().indicatePessimisticFixpoint();
+        return AA;
+      }
+    }
+
+    // If this is queried in the manifest stage, we force the AA to indicate
+    // pessimistic fixpoint immediately.
+    if (Phase == AttributorPhase::MANIFEST) {
+      AA.getState().indicatePessimisticFixpoint();
+      return AA;
+    }
+
+    // Allow seeded attributes to declare dependencies.
+    // Remember the seeding state.
+    AttributorPhase OldPhase = Phase;
+    Phase = AttributorPhase::UPDATE;
+
+    updateAA(AA);
+
+    Phase = OldPhase;
+
+    if (TrackDependence && AA.getState().isValidState())
+      recordDependence(AA, const_cast<AbstractAttribute &>(*QueryingAA),
+                       DepClass);
+    return AA;
   }
 
+  /// Return the attribute of \p AAType for \p IRP if existing. This also allows
+  /// non-AA users lookup.
+  template <typename AAType>
+  AAType *lookupAAFor(const IRPosition &IRP,
+                      const AbstractAttribute *QueryingAA = nullptr,
+                      bool TrackDependence = false,
+                      DepClassTy DepClass = DepClassTy::OPTIONAL) {
+    static_assert(std::is_base_of<AbstractAttribute, AAType>::value,
+                  "Cannot query an attribute with a type not derived from "
+                  "'AbstractAttribute'!");
+    assert((QueryingAA || !TrackDependence) &&
+           "Cannot track dependences without a QueryingAA!");
+
+    // Lookup the abstract attribute of type AAType. If found, return it after
+    // registering a dependence of QueryingAA on the one returned attribute.
+    AbstractAttribute *AAPtr = AAMap.lookup({&AAType::ID, IRP});
+    if (!AAPtr)
+      return nullptr;
+
+    AAType *AA = static_cast<AAType *>(AAPtr);
+
+    // Do not register a dependence on an attribute with an invalid state.
+    if (TrackDependence && AA->getState().isValidState())
+      recordDependence(*AA, const_cast<AbstractAttribute &>(*QueryingAA),
+                       DepClass);
+    return AA;
+  }
+
+  /// Explicitly record a dependence from \p FromAA to \p ToAA, that is if
+  /// \p FromAA changes \p ToAA should be updated as well.
+  ///
+  /// This method should be used in conjunction with the `getAAFor` method and
+  /// with the TrackDependence flag passed to the method set to false. This can
+  /// be beneficial to avoid false dependences but it requires the users of
+  /// `getAAFor` to explicitly record true dependences through this method.
+  /// The \p DepClass flag indicates if the dependence is striclty necessary.
+  /// That means for required dependences, if \p FromAA changes to an invalid
+  /// state, \p ToAA can be moved to a pessimistic fixpoint because it required
+  /// information from \p FromAA but none are available anymore.
+  void recordDependence(const AbstractAttribute &FromAA,
+                        const AbstractAttribute &ToAA, DepClassTy DepClass);
+
   /// Introduce a new abstract attribute into the fixpoint analysis.
   ///
   /// Note that ownership of the attribute is given to the Attributor. It will
   /// invoke delete for the Attributor on destruction of the Attributor.
   ///
-  /// Attributes are identified by
-  ///  (1) their anchored value (see AA.getAnchoredValue()),
-  ///  (2) their argument number (\p ArgNo, or Argument::getArgNo()), and
-  ///  (3) their default attribute kind (see AAType::ID).
-  template <typename AAType> AAType &registerAA(AAType &AA, int ArgNo = -1) {
+  /// Attributes are identified by their IR position (AAType::getIRPosition())
+  /// and the address of their static member (see AAType::ID).
+  template <typename AAType> AAType &registerAA(AAType &AA) {
     static_assert(std::is_base_of<AbstractAttribute, AAType>::value,
                   "Cannot register an attribute with a type not derived from "
                   "'AbstractAttribute'!");
-
-    // Determine the anchor value and the argument number which are used to
-    // lookup the attribute together with AAType::ID.
-    Value &AnchoredVal = AA.getAnchoredValue();
-    if (auto *Arg = dyn_cast<Argument>(&AnchoredVal))
-      ArgNo = Arg->getArgNo();
-
     // Put the attribute in the lookup map structure and the container we use to
     // keep track of all attributes.
-    AAMap[{&AnchoredVal, ArgNo}][AAType::ID] = &AA;
-    AllAbstractAttributes.push_back(&AA);
+    const IRPosition &IRP = AA.getIRPosition();
+    AbstractAttribute *&AAPtr = AAMap[{&AAType::ID, IRP}];
+
+    assert(!AAPtr && "Attribute already in map!");
+    AAPtr = &AA;
+
+    // Register AA with the synthetic root only before the manifest stage.
+    if (Phase == AttributorPhase::SEEDING || Phase == AttributorPhase::UPDATE)
+      DG.SyntheticRoot.Deps.push_back(
+          AADepGraphNode::DepTy(&AA, unsigned(DepClassTy::REQUIRED)));
+
     return AA;
   }
 
+  /// Return the internal information cache.
+  InformationCache &getInfoCache() { return InfoCache; }
+
+  /// Return true if this is a module pass, false otherwise.
+  bool isModulePass() const {
+    return !Functions.empty() &&
+           Functions.size() == Functions.front()->getParent()->size();
+  }
+
+  /// Return true if we derive attributes for \p Fn
+  bool isRunOn(Function &Fn) const {
+    return Functions.empty() || Functions.count(&Fn);
+  }
+
   /// Determine opportunities to derive 'default' attributes in \p F and create
   /// abstract attribute objects for them.
   ///
   /// \param F The function that is checked for attribute opportunities.
-  /// \param InfoCache A cache for information queryable by the new attributes.
-  /// \param Whitelist If not null, a set limiting the attribute opportunities.
   ///
   /// Note that abstract attribute instances are generally created even if the
   /// IR already contains the information they would deduce. The most important
   /// reason for this is the single interface, the one of the abstract attribute
   /// instance, which can be queried without the need to look at the IR in
   /// various places.
-  void identifyDefaultAbstractAttributes(
-      Function &F, InformationCache &InfoCache,
-      DenseSet</* Attribute::AttrKind */ unsigned> *Whitelist = nullptr);
+  void identifyDefaultAbstractAttributes(Function &F);
 
-private:
-  /// The set of all abstract attributes.
-  ///{
-  using AAVector = SmallVector<AbstractAttribute *, 64>;
-  AAVector AllAbstractAttributes;
-  ///}
-
-  /// A nested map to lookup abstract attributes based on the anchored value and
-  /// an argument positions (or -1) on the outer level, and attribute kinds
-  /// (Attribute::AttrKind) on the inner level.
-  ///{
-  using KindToAbstractAttributeMap = DenseMap<unsigned, AbstractAttribute *>;
-  DenseMap<std::pair<const Value *, int>, KindToAbstractAttributeMap> AAMap;
-  ///}
-
-  /// A map from abstract attributes to the ones that queried them through calls
-  /// to the getAAFor<...>(...) method.
-  ///{
-  using QueryMapTy =
-      DenseMap<AbstractAttribute *, SetVector<AbstractAttribute *>>;
-  QueryMapTy QueryMap;
-  ///}
-};
-
-/// Data structure to hold cached (LLVM-IR) information.
-///
-/// All attributes are given an InformationCache object at creation time to
-/// avoid inspection of the IR by all of them individually. This default
-/// InformationCache will hold information required by 'default' attributes,
-/// thus the ones deduced when Attributor::identifyDefaultAbstractAttributes(..)
-/// is called.
-///
-/// If custom abstract attributes, registered manually through
-/// Attributor::registerAA(...), need more information, especially if it is not
-/// reusable, it is advised to inherit from the InformationCache and cast the
-/// instance down in the abstract attributes.
-struct InformationCache {
-  /// A map type from opcodes to instructions with this opcode.
-  using OpcodeInstMapTy = DenseMap<unsigned, SmallVector<Instruction *, 32>>;
-
-  /// Return the map that relates "interesting" opcodes with all instructions
-  /// with that opcode in \p F.
-  OpcodeInstMapTy &getOpcodeInstMapForFunction(Function &F) {
-    return FuncInstOpcodeMap[&F];
+  /// Determine whether the function \p F is IPO amendable
+  ///
+  /// If a function is exactly defined or it has alwaysinline attribute
+  /// and is viable to be inlined, we say it is IPO amendable
+  bool isFunctionIPOAmendable(const Function &F) {
+    return F.hasExactDefinition() || InfoCache.InlineableFunctions.count(&F);
   }
 
-  /// A vector type to hold instructions.
-  using InstructionVectorTy = std::vector<Instruction *>;
+  /// Mark the internal function \p F as live.
+  ///
+  /// This will trigger the identification and initialization of attributes for
+  /// \p F.
+  void markLiveInternalFunction(const Function &F) {
+    assert(F.hasLocalLinkage() &&
+           "Only local linkage is assumed dead initially.");
 
-  /// Return the instructions in \p F that may read or write memory.
-  InstructionVectorTy &getReadOrWriteInstsForFunction(Function &F) {
-    return FuncRWInstsMap[&F];
+    identifyDefaultAbstractAttributes(const_cast<Function &>(F));
   }
 
+  /// Helper function to remove callsite.
+  void removeCallSite(CallInst *CI) {
+    if (!CI)
+      return;
+
+    CGUpdater.removeCallSite(*CI);
+  }
+
+  /// Record that \p U is to be replaces with \p NV after information was
+  /// manifested. This also triggers deletion of trivially dead istructions.
+  bool changeUseAfterManifest(Use &U, Value &NV) {
+    Value *&V = ToBeChangedUses[&U];
+    if (V && (V->stripPointerCasts() == NV.stripPointerCasts() ||
+              isa_and_nonnull<UndefValue>(V)))
+      return false;
+    assert((!V || V == &NV || isa<UndefValue>(NV)) &&
+           "Use was registered twice for replacement with different values!");
+    V = &NV;
+    return true;
+  }
+
+  /// Helper function to replace all uses of \p V with \p NV. Return true if
+  /// there is any change. The flag \p ChangeDroppable indicates if dropppable
+  /// uses should be changed too.
+  bool changeValueAfterManifest(Value &V, Value &NV,
+                                bool ChangeDroppable = true) {
+    bool Changed = false;
+    for (auto &U : V.uses())
+      if (ChangeDroppable || !U.getUser()->isDroppable())
+        Changed |= changeUseAfterManifest(U, NV);
+
+    return Changed;
+  }
+
+  /// Record that \p I is to be replaced with `unreachable` after information
+  /// was manifested.
+  void changeToUnreachableAfterManifest(Instruction *I) {
+    ToBeChangedToUnreachableInsts.insert(I);
+  }
+
+  /// Record that \p II has at least one dead successor block. This information
+  /// is used, e.g., to replace \p II with a call, after information was
+  /// manifested.
+  void registerInvokeWithDeadSuccessor(InvokeInst &II) {
+    InvokeWithDeadSuccessor.push_back(&II);
+  }
+
+  /// Record that \p I is deleted after information was manifested. This also
+  /// triggers deletion of trivially dead istructions.
+  void deleteAfterManifest(Instruction &I) { ToBeDeletedInsts.insert(&I); }
+
+  /// Record that \p BB is deleted after information was manifested. This also
+  /// triggers deletion of trivially dead istructions.
+  void deleteAfterManifest(BasicBlock &BB) { ToBeDeletedBlocks.insert(&BB); }
+
+  /// Record that \p F is deleted after information was manifested.
+  void deleteAfterManifest(Function &F) { ToBeDeletedFunctions.insert(&F); }
+
+  /// If \p V is assumed to be a constant, return it, if it is unclear yet,
+  /// return None, otherwise return `nullptr`.
+  Optional<Constant *> getAssumedConstant(const Value &V,
+                                          const AbstractAttribute &AA,
+                                          bool &UsedAssumedInformation);
+
+  /// Return true if \p AA (or its context instruction) is assumed dead.
+  ///
+  /// If \p LivenessAA is not provided it is queried.
+  bool isAssumedDead(const AbstractAttribute &AA, const AAIsDead *LivenessAA,
+                     bool CheckBBLivenessOnly = false,
+                     DepClassTy DepClass = DepClassTy::OPTIONAL);
+
+  /// Return true if \p I is assumed dead.
+  ///
+  /// If \p LivenessAA is not provided it is queried.
+  bool isAssumedDead(const Instruction &I, const AbstractAttribute *QueryingAA,
+                     const AAIsDead *LivenessAA,
+                     bool CheckBBLivenessOnly = false,
+                     DepClassTy DepClass = DepClassTy::OPTIONAL);
+
+  /// Return true if \p U is assumed dead.
+  ///
+  /// If \p FnLivenessAA is not provided it is queried.
+  bool isAssumedDead(const Use &U, const AbstractAttribute *QueryingAA,
+                     const AAIsDead *FnLivenessAA,
+                     bool CheckBBLivenessOnly = false,
+                     DepClassTy DepClass = DepClassTy::OPTIONAL);
+
+  /// Return true if \p IRP is assumed dead.
+  ///
+  /// If \p FnLivenessAA is not provided it is queried.
+  bool isAssumedDead(const IRPosition &IRP, const AbstractAttribute *QueryingAA,
+                     const AAIsDead *FnLivenessAA,
+                     bool CheckBBLivenessOnly = false,
+                     DepClassTy DepClass = DepClassTy::OPTIONAL);
+
+  /// Check \p Pred on all (transitive) uses of \p V.
+  ///
+  /// This method will evaluate \p Pred on all (transitive) uses of the
+  /// associated value and return true if \p Pred holds every time.
+  bool checkForAllUses(function_ref<bool(const Use &, bool &)> Pred,
+                       const AbstractAttribute &QueryingAA, const Value &V,
+                       DepClassTy LivenessDepClass = DepClassTy::OPTIONAL);
+
+  /// Helper struct used in the communication between an abstract attribute (AA)
+  /// that wants to change the signature of a function and the Attributor which
+  /// applies the changes. The struct is partially initialized with the
+  /// information from the AA (see the constructor). All other members are
+  /// provided by the Attributor prior to invoking any callbacks.
+  struct ArgumentReplacementInfo {
+    /// Callee repair callback type
+    ///
+    /// The function repair callback is invoked once to rewire the replacement
+    /// arguments in the body of the new function. The argument replacement info
+    /// is passed, as build from the registerFunctionSignatureRewrite call, as
+    /// well as the replacement function and an iteratore to the first
+    /// replacement argument.
+    using CalleeRepairCBTy = std::function<void(
+        const ArgumentReplacementInfo &, Function &, Function::arg_iterator)>;
+
+    /// Abstract call site (ACS) repair callback type
+    ///
+    /// The abstract call site repair callback is invoked once on every abstract
+    /// call site of the replaced function (\see ReplacedFn). The callback needs
+    /// to provide the operands for the call to the new replacement function.
+    /// The number and type of the operands appended to the provided vector
+    /// (second argument) is defined by the number and types determined through
+    /// the replacement type vector (\see ReplacementTypes). The first argument
+    /// is the ArgumentReplacementInfo object registered with the Attributor
+    /// through the registerFunctionSignatureRewrite call.
+    using ACSRepairCBTy =
+        std::function<void(const ArgumentReplacementInfo &, AbstractCallSite,
+                           SmallVectorImpl<Value *> &)>;
+
+    /// Simple getters, see the corresponding members for details.
+    ///{
+
+    Attributor &getAttributor() const { return A; }
+    const Function &getReplacedFn() const { return ReplacedFn; }
+    const Argument &getReplacedArg() const { return ReplacedArg; }
+    unsigned getNumReplacementArgs() const { return ReplacementTypes.size(); }
+    const SmallVectorImpl<Type *> &getReplacementTypes() const {
+      return ReplacementTypes;
+    }
+
+    ///}
+
+  private:
+    /// Constructor that takes the argument to be replaced, the types of
+    /// the replacement arguments, as well as callbacks to repair the call sites
+    /// and new function after the replacement happened.
+    ArgumentReplacementInfo(Attributor &A, Argument &Arg,
+                            ArrayRef<Type *> ReplacementTypes,
+                            CalleeRepairCBTy &&CalleeRepairCB,
+                            ACSRepairCBTy &&ACSRepairCB)
+        : A(A), ReplacedFn(*Arg.getParent()), ReplacedArg(Arg),
+          ReplacementTypes(ReplacementTypes.begin(), ReplacementTypes.end()),
+          CalleeRepairCB(std::move(CalleeRepairCB)),
+          ACSRepairCB(std::move(ACSRepairCB)) {}
+
+    /// Reference to the attributor to allow access from the callbacks.
+    Attributor &A;
+
+    /// The "old" function replaced by ReplacementFn.
+    const Function &ReplacedFn;
+
+    /// The "old" argument replaced by new ones defined via ReplacementTypes.
+    const Argument &ReplacedArg;
+
+    /// The types of the arguments replacing ReplacedArg.
+    const SmallVector<Type *, 8> ReplacementTypes;
+
+    /// Callee repair callback, see CalleeRepairCBTy.
+    const CalleeRepairCBTy CalleeRepairCB;
+
+    /// Abstract call site (ACS) repair callback, see ACSRepairCBTy.
+    const ACSRepairCBTy ACSRepairCB;
+
+    /// Allow access to the private members from the Attributor.
+    friend struct Attributor;
+  };
+
+  /// Check if we can rewrite a function signature.
+  ///
+  /// The argument \p Arg is replaced with new ones defined by the number,
+  /// order, and types in \p ReplacementTypes.
+  ///
+  /// \returns True, if the replacement can be registered, via
+  /// registerFunctionSignatureRewrite, false otherwise.
+  bool isValidFunctionSignatureRewrite(Argument &Arg,
+                                       ArrayRef<Type *> ReplacementTypes);
+
+  /// Register a rewrite for a function signature.
+  ///
+  /// The argument \p Arg is replaced with new ones defined by the number,
+  /// order, and types in \p ReplacementTypes. The rewiring at the call sites is
+  /// done through \p ACSRepairCB and at the callee site through
+  /// \p CalleeRepairCB.
+  ///
+  /// \returns True, if the replacement was registered, false otherwise.
+  bool registerFunctionSignatureRewrite(
+      Argument &Arg, ArrayRef<Type *> ReplacementTypes,
+      ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB,
+      ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB);
+
+  /// Check \p Pred on all function call sites.
+  ///
+  /// This method will evaluate \p Pred on call sites and return
+  /// true if \p Pred holds in every call sites. However, this is only possible
+  /// all call sites are known, hence the function has internal linkage.
+  /// If true is returned, \p AllCallSitesKnown is set if all possible call
+  /// sites of the function have been visited.
+  bool checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
+                            const AbstractAttribute &QueryingAA,
+                            bool RequireAllCallSites, bool &AllCallSitesKnown);
+
+  /// Check \p Pred on all values potentially returned by \p F.
+  ///
+  /// This method will evaluate \p Pred on all values potentially returned by
+  /// the function associated with \p QueryingAA. The returned values are
+  /// matched with their respective return instructions. Returns true if \p Pred
+  /// holds on all of them.
+  bool checkForAllReturnedValuesAndReturnInsts(
+      function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred,
+      const AbstractAttribute &QueryingAA);
+
+  /// Check \p Pred on all values potentially returned by the function
+  /// associated with \p QueryingAA.
+  ///
+  /// This is the context insensitive version of the method above.
+  bool checkForAllReturnedValues(function_ref<bool(Value &)> Pred,
+                                 const AbstractAttribute &QueryingAA);
+
+  /// Check \p Pred on all instructions with an opcode present in \p Opcodes.
+  ///
+  /// This method will evaluate \p Pred on all instructions with an opcode
+  /// present in \p Opcode and return true if \p Pred holds on all of them.
+  bool checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
+                               const AbstractAttribute &QueryingAA,
+                               const ArrayRef<unsigned> &Opcodes,
+                               bool CheckBBLivenessOnly = false);
+
+  /// Check \p Pred on all call-like instructions (=CallBased derived).
+  ///
+  /// See checkForAllCallLikeInstructions(...) for more information.
+  bool checkForAllCallLikeInstructions(function_ref<bool(Instruction &)> Pred,
+                                       const AbstractAttribute &QueryingAA) {
+    return checkForAllInstructions(Pred, QueryingAA,
+                                   {(unsigned)Instruction::Invoke,
+                                    (unsigned)Instruction::CallBr,
+                                    (unsigned)Instruction::Call});
+  }
+
+  /// Check \p Pred on all Read/Write instructions.
+  ///
+  /// This method will evaluate \p Pred on all instructions that read or write
+  /// to memory present in the information cache and return true if \p Pred
+  /// holds on all of them.
+  bool checkForAllReadWriteInstructions(function_ref<bool(Instruction &)> Pred,
+                                        AbstractAttribute &QueryingAA);
+
+  /// Create a shallow wrapper for \p F such that \p F has internal linkage
+  /// afterwards. It also sets the original \p F 's name to anonymous
+  ///
+  /// A wrapper is a function with the same type (and attributes) as \p F
+  /// that will only call \p F and return the result, if any.
+  ///
+  /// Assuming the declaration of looks like:
+  ///   rty F(aty0 arg0, ..., atyN argN);
+  ///
+  /// The wrapper will then look as follows:
+  ///   rty wrapper(aty0 arg0, ..., atyN argN) {
+  ///     return F(arg0, ..., argN);
+  ///   }
+  ///
+  static void createShallowWrapper(Function &F);
+
+  /// Return the data layout associated with the anchor scope.
+  const DataLayout &getDataLayout() const { return InfoCache.DL; }
+
+  /// The allocator used to allocate memory, e.g. for `AbstractAttribute`s.
+  BumpPtrAllocator &Allocator;
+
 private:
-  /// A map type from functions to opcode to instruction maps.
-  using FuncInstOpcodeMapTy = DenseMap<Function *, OpcodeInstMapTy>;
+  /// This method will do fixpoint iteration until fixpoint or the
+  /// maximum iteration count is reached.
+  ///
+  /// If the maximum iteration count is reached, This method will
+  /// indicate pessimistic fixpoint on attributes that transitively depend
+  /// on attributes that were scheduled for an update.
+  void runTillFixpoint();
 
-  /// A map type from functions to their read or write instructions.
-  using FuncRWInstsMapTy = DenseMap<Function *, InstructionVectorTy>;
+  /// Gets called after scheduling, manifests attributes to the LLVM IR.
+  ChangeStatus manifestAttributes();
 
-  /// A nested map that remembers all instructions in a function with a certain
-  /// instruction opcode (Instruction::getOpcode()).
-  FuncInstOpcodeMapTy FuncInstOpcodeMap;
+  /// Gets called after attributes have been manifested, cleans up the IR.
+  /// Deletes dead functions, blocks and instructions.
+  /// Rewrites function signitures and updates the call graph.
+  ChangeStatus cleanupIR();
 
-  /// A map from functions to their instructions that may read or write memory.
-  FuncRWInstsMapTy FuncRWInstsMap;
+  /// Identify internal functions that are effectively dead, thus not reachable
+  /// from a live entry point. The functions are added to ToBeDeletedFunctions.
+  void identifyDeadInternalFunctions();
 
-  /// Give the Attributor access to the members so
-  /// Attributor::identifyDefaultAbstractAttributes(...) can initialize them.
-  friend struct Attributor;
+  /// Run `::update` on \p AA and track the dependences queried while doing so.
+  /// Also adjust the state if we know further updates are not necessary.
+  ChangeStatus updateAA(AbstractAttribute &AA);
+
+  /// Remember the dependences on the top of the dependence stack such that they
+  /// may trigger further updates. (\see DependenceStack)
+  void rememberDependences();
+
+  /// Check \p Pred on all call sites of \p Fn.
+  ///
+  /// This method will evaluate \p Pred on call sites and return
+  /// true if \p Pred holds in every call sites. However, this is only possible
+  /// all call sites are known, hence the function has internal linkage.
+  /// If true is returned, \p AllCallSitesKnown is set if all possible call
+  /// sites of the function have been visited.
+  bool checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
+                            const Function &Fn, bool RequireAllCallSites,
+                            const AbstractAttribute *QueryingAA,
+                            bool &AllCallSitesKnown);
+
+  /// Apply all requested function signature rewrites
+  /// (\see registerFunctionSignatureRewrite) and return Changed if the module
+  /// was altered.
+  ChangeStatus
+  rewriteFunctionSignatures(SmallPtrSetImpl<Function *> &ModifiedFns);
+
+  /// Check if the Attribute \p AA should be seeded.
+  /// See getOrCreateAAFor.
+  bool shouldSeedAttribute(AbstractAttribute &AA);
+
+  /// A nested map to lookup abstract attributes based on the argument position
+  /// on the outer level, and the addresses of the static member (AAType::ID) on
+  /// the inner level.
+  ///{
+  using AAMapKeyTy = std::pair<const char *, IRPosition>;
+  DenseMap<AAMapKeyTy, AbstractAttribute *> AAMap;
+  ///}
+
+  /// Map to remember all requested signature changes (= argument replacements).
+  DenseMap<Function *, SmallVector<std::unique_ptr<ArgumentReplacementInfo>, 8>>
+      ArgumentReplacementMap;
+
+  /// The set of functions we are deriving attributes for.
+  SetVector<Function *> &Functions;
+
+  /// The information cache that holds pre-processed (LLVM-IR) information.
+  InformationCache &InfoCache;
+
+  /// Helper to update an underlying call graph.
+  CallGraphUpdater &CGUpdater;
+
+  /// Abstract Attribute dependency graph
+  AADepGraph DG;
+
+  /// Set of functions for which we modified the content such that it might
+  /// impact the call graph.
+  SmallPtrSet<Function *, 8> CGModifiedFunctions;
+
+  /// Information about a dependence. If FromAA is changed ToAA needs to be
+  /// updated as well.
+  struct DepInfo {
+    const AbstractAttribute *FromAA;
+    const AbstractAttribute *ToAA;
+    DepClassTy DepClass;
+  };
+
+  /// The dependence stack is used to track dependences during an
+  /// `AbstractAttribute::update` call. As `AbstractAttribute::update` can be
+  /// recursive we might have multiple vectors of dependences in here. The stack
+  /// size, should be adjusted according to the expected recursion depth and the
+  /// inner dependence vector size to the expected number of dependences per
+  /// abstract attribute. Since the inner vectors are actually allocated on the
+  /// stack we can be generous with their size.
+  using DependenceVector = SmallVector<DepInfo, 8>;
+  SmallVector<DependenceVector *, 16> DependenceStack;
+
+  /// If not null, a set limiting the attribute opportunities.
+  const DenseSet<const char *> *Allowed;
+
+  /// A set to remember the functions we already assume to be live and visited.
+  DenseSet<const Function *> VisitedFunctions;
+
+  /// Uses we replace with a new value after manifest is done. We will remove
+  /// then trivially dead instructions as well.
+  DenseMap<Use *, Value *> ToBeChangedUses;
+
+  /// Instructions we replace with `unreachable` insts after manifest is done.
+  SmallDenseSet<WeakVH, 16> ToBeChangedToUnreachableInsts;
+
+  /// Invoke instructions with at least a single dead successor block.
+  SmallVector<WeakVH, 16> InvokeWithDeadSuccessor;
+
+  /// A flag that indicates which stage of the process we are in. Initially, the
+  /// phase is SEEDING. Phase is changed in `Attributor::run()`
+  enum class AttributorPhase {
+    SEEDING,
+    UPDATE,
+    MANIFEST,
+    CLEANUP,
+  } Phase = AttributorPhase::SEEDING;
+
+  /// The current initialization chain length. Tracked to avoid stack overflows.
+  unsigned InitializationChainLength = 0;
+
+  /// Functions, blocks, and instructions we delete after manifest is done.
+  ///
+  ///{
+  SmallPtrSet<Function *, 8> ToBeDeletedFunctions;
+  SmallPtrSet<BasicBlock *, 8> ToBeDeletedBlocks;
+  SmallDenseSet<WeakVH, 8> ToBeDeletedInsts;
+  ///}
+
+  friend AADepGraph;
 };
 
 /// An interface to query the internal state of an abstract attribute.
@@ -339,9 +1705,10 @@
 ///
 /// All methods need to be implemented by the subclass. For the common use case,
 /// a single boolean state or a bit-encoded state, the BooleanState and
-/// IntegerState classes are already provided. An abstract attribute can inherit
-/// from them to get the abstract state interface and additional methods to
-/// directly modify the state based if needed. See the class comments for help.
+/// {Inc,Dec,Bit}IntegerState classes are already provided. An abstract
+/// attribute can inherit from them to get the abstract state interface and
+/// additional methods to directly modify the state based if needed. See the
+/// class comments for help.
 struct AbstractState {
   virtual ~AbstractState() {}
 
@@ -357,13 +1724,17 @@
   ///
   /// This will usually make the optimistically assumed state the known to be
   /// true state.
-  virtual void indicateOptimisticFixpoint() = 0;
+  ///
+  /// \returns ChangeStatus::UNCHANGED as the assumed value should not change.
+  virtual ChangeStatus indicateOptimisticFixpoint() = 0;
 
   /// Indicate that the abstract state should converge to the pessimistic state.
   ///
   /// This will usually revert the optimistically assumed state to the known to
   /// be true state.
-  virtual void indicatePessimisticFixpoint() = 0;
+  ///
+  /// \returns ChangeStatus::CHANGED as the assumed value may change.
+  virtual ChangeStatus indicatePessimisticFixpoint() = 0;
 };
 
 /// Simple state with integers encoding.
@@ -376,15 +1747,24 @@
 /// force/inidicate a fixpoint. If an optimistic one is indicated, the known
 /// state will catch up with the assumed one, for a pessimistic fixpoint it is
 /// the other way around.
-struct IntegerState : public AbstractState {
-  /// Undrlying integer type, we assume 32 bits to be enough.
-  using base_t = uint32_t;
+template <typename base_ty, base_ty BestState, base_ty WorstState>
+struct IntegerStateBase : public AbstractState {
+  using base_t = base_ty;
 
-  /// Initialize the (best) state.
-  IntegerState(base_t BestState = ~0) : Assumed(BestState) {}
+  IntegerStateBase() {}
+  IntegerStateBase(base_t Assumed) : Assumed(Assumed) {}
+
+  /// Return the best possible representable state.
+  static constexpr base_t getBestState() { return BestState; }
+  static constexpr base_t getBestState(const IntegerStateBase &) {
+    return getBestState();
+  }
 
   /// Return the worst possible representable state.
-  static constexpr base_t getWorstState() { return 0; }
+  static constexpr base_t getWorstState() { return WorstState; }
+  static constexpr base_t getWorstState(const IntegerStateBase &) {
+    return getWorstState();
+  }
 
   /// See AbstractState::isValidState()
   /// NOTE: For now we simply pretend that the worst possible state is invalid.
@@ -394,10 +1774,16 @@
   bool isAtFixpoint() const override { return Assumed == Known; }
 
   /// See AbstractState::indicateOptimisticFixpoint(...)
-  void indicateOptimisticFixpoint() override { Known = Assumed; }
+  ChangeStatus indicateOptimisticFixpoint() override {
+    Known = Assumed;
+    return ChangeStatus::UNCHANGED;
+  }
 
   /// See AbstractState::indicatePessimisticFixpoint(...)
-  void indicatePessimisticFixpoint() override { Assumed = Known; }
+  ChangeStatus indicatePessimisticFixpoint() override {
+    Assumed = Known;
+    return ChangeStatus::CHANGED;
+  }
 
   /// Return the known state encoding
   base_t getKnown() const { return Known; }
@@ -405,49 +1791,432 @@
   /// Return the assumed state encoding.
   base_t getAssumed() const { return Assumed; }
 
-  /// Return true if the bits set in \p BitsEncoding are "known bits".
-  bool isKnown(base_t BitsEncoding) const {
-    return (Known & BitsEncoding) == BitsEncoding;
+  /// Equality for IntegerStateBase.
+  bool
+  operator==(const IntegerStateBase<base_t, BestState, WorstState> &R) const {
+    return this->getAssumed() == R.getAssumed() &&
+           this->getKnown() == R.getKnown();
   }
 
-  /// Return true if the bits set in \p BitsEncoding are "assumed bits".
-  bool isAssumed(base_t BitsEncoding) const {
-    return (Assumed & BitsEncoding) == BitsEncoding;
+  /// Inequality for IntegerStateBase.
+  bool
+  operator!=(const IntegerStateBase<base_t, BestState, WorstState> &R) const {
+    return !(*this == R);
   }
 
-  /// Add the bits in \p BitsEncoding to the "known bits".
-  IntegerState &addKnownBits(base_t Bits) {
-    // Make sure we never miss any "known bits".
-    Assumed |= Bits;
-    Known |= Bits;
-    return *this;
+  /// "Clamp" this state with \p R. The result is subtype dependent but it is
+  /// intended that only information assumed in both states will be assumed in
+  /// this one afterwards.
+  void operator^=(const IntegerStateBase<base_t, BestState, WorstState> &R) {
+    handleNewAssumedValue(R.getAssumed());
   }
 
-  /// Remove the bits in \p BitsEncoding from the "assumed bits" if not known.
-  IntegerState &removeAssumedBits(base_t BitsEncoding) {
-    // Make sure we never loose any "known bits".
-    Assumed = (Assumed & ~BitsEncoding) | Known;
-    return *this;
+  /// "Clamp" this state with \p R. The result is subtype dependent but it is
+  /// intended that information known in either state will be known in
+  /// this one afterwards.
+  void operator+=(const IntegerStateBase<base_t, BestState, WorstState> &R) {
+    handleNewKnownValue(R.getKnown());
   }
 
-  /// Keep only "assumed bits" also set in \p BitsEncoding but all known ones.
-  IntegerState &intersectAssumedBits(base_t BitsEncoding) {
-    // Make sure we never loose any "known bits".
-    Assumed = (Assumed & BitsEncoding) | Known;
-    return *this;
+  void operator|=(const IntegerStateBase<base_t, BestState, WorstState> &R) {
+    joinOR(R.getAssumed(), R.getKnown());
   }
 
-private:
+  void operator&=(const IntegerStateBase<base_t, BestState, WorstState> &R) {
+    joinAND(R.getAssumed(), R.getKnown());
+  }
+
+protected:
+  /// Handle a new assumed value \p Value. Subtype dependent.
+  virtual void handleNewAssumedValue(base_t Value) = 0;
+
+  /// Handle a new known value \p Value. Subtype dependent.
+  virtual void handleNewKnownValue(base_t Value) = 0;
+
+  /// Handle a  value \p Value. Subtype dependent.
+  virtual void joinOR(base_t AssumedValue, base_t KnownValue) = 0;
+
+  /// Handle a new assumed value \p Value. Subtype dependent.
+  virtual void joinAND(base_t AssumedValue, base_t KnownValue) = 0;
+
   /// The known state encoding in an integer of type base_t.
   base_t Known = getWorstState();
 
   /// The assumed state encoding in an integer of type base_t.
-  base_t Assumed;
+  base_t Assumed = getBestState();
+};
+
+/// Specialization of the integer state for a bit-wise encoding.
+template <typename base_ty = uint32_t, base_ty BestState = ~base_ty(0),
+          base_ty WorstState = 0>
+struct BitIntegerState
+    : public IntegerStateBase<base_ty, BestState, WorstState> {
+  using base_t = base_ty;
+
+  /// Return true if the bits set in \p BitsEncoding are "known bits".
+  bool isKnown(base_t BitsEncoding) const {
+    return (this->Known & BitsEncoding) == BitsEncoding;
+  }
+
+  /// Return true if the bits set in \p BitsEncoding are "assumed bits".
+  bool isAssumed(base_t BitsEncoding) const {
+    return (this->Assumed & BitsEncoding) == BitsEncoding;
+  }
+
+  /// Add the bits in \p BitsEncoding to the "known bits".
+  BitIntegerState &addKnownBits(base_t Bits) {
+    // Make sure we never miss any "known bits".
+    this->Assumed |= Bits;
+    this->Known |= Bits;
+    return *this;
+  }
+
+  /// Remove the bits in \p BitsEncoding from the "assumed bits" if not known.
+  BitIntegerState &removeAssumedBits(base_t BitsEncoding) {
+    return intersectAssumedBits(~BitsEncoding);
+  }
+
+  /// Remove the bits in \p BitsEncoding from the "known bits".
+  BitIntegerState &removeKnownBits(base_t BitsEncoding) {
+    this->Known = (this->Known & ~BitsEncoding);
+    return *this;
+  }
+
+  /// Keep only "assumed bits" also set in \p BitsEncoding but all known ones.
+  BitIntegerState &intersectAssumedBits(base_t BitsEncoding) {
+    // Make sure we never loose any "known bits".
+    this->Assumed = (this->Assumed & BitsEncoding) | this->Known;
+    return *this;
+  }
+
+private:
+  void handleNewAssumedValue(base_t Value) override {
+    intersectAssumedBits(Value);
+  }
+  void handleNewKnownValue(base_t Value) override { addKnownBits(Value); }
+  void joinOR(base_t AssumedValue, base_t KnownValue) override {
+    this->Known |= KnownValue;
+    this->Assumed |= AssumedValue;
+  }
+  void joinAND(base_t AssumedValue, base_t KnownValue) override {
+    this->Known &= KnownValue;
+    this->Assumed &= AssumedValue;
+  }
+};
+
+/// Specialization of the integer state for an increasing value, hence ~0u is
+/// the best state and 0 the worst.
+template <typename base_ty = uint32_t, base_ty BestState = ~base_ty(0),
+          base_ty WorstState = 0>
+struct IncIntegerState
+    : public IntegerStateBase<base_ty, BestState, WorstState> {
+  using super = IntegerStateBase<base_ty, BestState, WorstState>;
+  using base_t = base_ty;
+
+  IncIntegerState() : super() {}
+  IncIntegerState(base_t Assumed) : super(Assumed) {}
+
+  /// Return the best possible representable state.
+  static constexpr base_t getBestState() { return BestState; }
+  static constexpr base_t
+  getBestState(const IncIntegerState<base_ty, BestState, WorstState> &) {
+    return getBestState();
+  }
+
+  /// Take minimum of assumed and \p Value.
+  IncIntegerState &takeAssumedMinimum(base_t Value) {
+    // Make sure we never loose "known value".
+    this->Assumed = std::max(std::min(this->Assumed, Value), this->Known);
+    return *this;
+  }
+
+  /// Take maximum of known and \p Value.
+  IncIntegerState &takeKnownMaximum(base_t Value) {
+    // Make sure we never loose "known value".
+    this->Assumed = std::max(Value, this->Assumed);
+    this->Known = std::max(Value, this->Known);
+    return *this;
+  }
+
+private:
+  void handleNewAssumedValue(base_t Value) override {
+    takeAssumedMinimum(Value);
+  }
+  void handleNewKnownValue(base_t Value) override { takeKnownMaximum(Value); }
+  void joinOR(base_t AssumedValue, base_t KnownValue) override {
+    this->Known = std::max(this->Known, KnownValue);
+    this->Assumed = std::max(this->Assumed, AssumedValue);
+  }
+  void joinAND(base_t AssumedValue, base_t KnownValue) override {
+    this->Known = std::min(this->Known, KnownValue);
+    this->Assumed = std::min(this->Assumed, AssumedValue);
+  }
+};
+
+/// Specialization of the integer state for a decreasing value, hence 0 is the
+/// best state and ~0u the worst.
+template <typename base_ty = uint32_t>
+struct DecIntegerState : public IntegerStateBase<base_ty, 0, ~base_ty(0)> {
+  using base_t = base_ty;
+
+  /// Take maximum of assumed and \p Value.
+  DecIntegerState &takeAssumedMaximum(base_t Value) {
+    // Make sure we never loose "known value".
+    this->Assumed = std::min(std::max(this->Assumed, Value), this->Known);
+    return *this;
+  }
+
+  /// Take minimum of known and \p Value.
+  DecIntegerState &takeKnownMinimum(base_t Value) {
+    // Make sure we never loose "known value".
+    this->Assumed = std::min(Value, this->Assumed);
+    this->Known = std::min(Value, this->Known);
+    return *this;
+  }
+
+private:
+  void handleNewAssumedValue(base_t Value) override {
+    takeAssumedMaximum(Value);
+  }
+  void handleNewKnownValue(base_t Value) override { takeKnownMinimum(Value); }
+  void joinOR(base_t AssumedValue, base_t KnownValue) override {
+    this->Assumed = std::min(this->Assumed, KnownValue);
+    this->Assumed = std::min(this->Assumed, AssumedValue);
+  }
+  void joinAND(base_t AssumedValue, base_t KnownValue) override {
+    this->Assumed = std::max(this->Assumed, KnownValue);
+    this->Assumed = std::max(this->Assumed, AssumedValue);
+  }
 };
 
 /// Simple wrapper for a single bit (boolean) state.
-struct BooleanState : public IntegerState {
-  BooleanState() : IntegerState(1){};
+struct BooleanState : public IntegerStateBase<bool, 1, 0> {
+  using super = IntegerStateBase<bool, 1, 0>;
+  using base_t = IntegerStateBase::base_t;
+
+  BooleanState() : super() {}
+  BooleanState(base_t Assumed) : super(Assumed) {}
+
+  /// Set the assumed value to \p Value but never below the known one.
+  void setAssumed(bool Value) { Assumed &= (Known | Value); }
+
+  /// Set the known and asssumed value to \p Value.
+  void setKnown(bool Value) {
+    Known |= Value;
+    Assumed |= Value;
+  }
+
+  /// Return true if the state is assumed to hold.
+  bool isAssumed() const { return getAssumed(); }
+
+  /// Return true if the state is known to hold.
+  bool isKnown() const { return getKnown(); }
+
+private:
+  void handleNewAssumedValue(base_t Value) override {
+    if (!Value)
+      Assumed = Known;
+  }
+  void handleNewKnownValue(base_t Value) override {
+    if (Value)
+      Known = (Assumed = Value);
+  }
+  void joinOR(base_t AssumedValue, base_t KnownValue) override {
+    Known |= KnownValue;
+    Assumed |= AssumedValue;
+  }
+  void joinAND(base_t AssumedValue, base_t KnownValue) override {
+    Known &= KnownValue;
+    Assumed &= AssumedValue;
+  }
+};
+
+/// State for an integer range.
+struct IntegerRangeState : public AbstractState {
+
+  /// Bitwidth of the associated value.
+  uint32_t BitWidth;
+
+  /// State representing assumed range, initially set to empty.
+  ConstantRange Assumed;
+
+  /// State representing known range, initially set to [-inf, inf].
+  ConstantRange Known;
+
+  IntegerRangeState(uint32_t BitWidth)
+      : BitWidth(BitWidth), Assumed(ConstantRange::getEmpty(BitWidth)),
+        Known(ConstantRange::getFull(BitWidth)) {}
+
+  IntegerRangeState(const ConstantRange &CR)
+      : BitWidth(CR.getBitWidth()), Assumed(CR),
+        Known(getWorstState(CR.getBitWidth())) {}
+
+  /// Return the worst possible representable state.
+  static ConstantRange getWorstState(uint32_t BitWidth) {
+    return ConstantRange::getFull(BitWidth);
+  }
+
+  /// Return the best possible representable state.
+  static ConstantRange getBestState(uint32_t BitWidth) {
+    return ConstantRange::getEmpty(BitWidth);
+  }
+  static ConstantRange getBestState(const IntegerRangeState &IRS) {
+    return getBestState(IRS.getBitWidth());
+  }
+
+  /// Return associated values' bit width.
+  uint32_t getBitWidth() const { return BitWidth; }
+
+  /// See AbstractState::isValidState()
+  bool isValidState() const override {
+    return BitWidth > 0 && !Assumed.isFullSet();
+  }
+
+  /// See AbstractState::isAtFixpoint()
+  bool isAtFixpoint() const override { return Assumed == Known; }
+
+  /// See AbstractState::indicateOptimisticFixpoint(...)
+  ChangeStatus indicateOptimisticFixpoint() override {
+    Known = Assumed;
+    return ChangeStatus::CHANGED;
+  }
+
+  /// See AbstractState::indicatePessimisticFixpoint(...)
+  ChangeStatus indicatePessimisticFixpoint() override {
+    Assumed = Known;
+    return ChangeStatus::CHANGED;
+  }
+
+  /// Return the known state encoding
+  ConstantRange getKnown() const { return Known; }
+
+  /// Return the assumed state encoding.
+  ConstantRange getAssumed() const { return Assumed; }
+
+  /// Unite assumed range with the passed state.
+  void unionAssumed(const ConstantRange &R) {
+    // Don't loose a known range.
+    Assumed = Assumed.unionWith(R).intersectWith(Known);
+  }
+
+  /// See IntegerRangeState::unionAssumed(..).
+  void unionAssumed(const IntegerRangeState &R) {
+    unionAssumed(R.getAssumed());
+  }
+
+  /// Unite known range with the passed state.
+  void unionKnown(const ConstantRange &R) {
+    // Don't loose a known range.
+    Known = Known.unionWith(R);
+    Assumed = Assumed.unionWith(Known);
+  }
+
+  /// See IntegerRangeState::unionKnown(..).
+  void unionKnown(const IntegerRangeState &R) { unionKnown(R.getKnown()); }
+
+  /// Intersect known range with the passed state.
+  void intersectKnown(const ConstantRange &R) {
+    Assumed = Assumed.intersectWith(R);
+    Known = Known.intersectWith(R);
+  }
+
+  /// See IntegerRangeState::intersectKnown(..).
+  void intersectKnown(const IntegerRangeState &R) {
+    intersectKnown(R.getKnown());
+  }
+
+  /// Equality for IntegerRangeState.
+  bool operator==(const IntegerRangeState &R) const {
+    return getAssumed() == R.getAssumed() && getKnown() == R.getKnown();
+  }
+
+  /// "Clamp" this state with \p R. The result is subtype dependent but it is
+  /// intended that only information assumed in both states will be assumed in
+  /// this one afterwards.
+  IntegerRangeState operator^=(const IntegerRangeState &R) {
+    // NOTE: `^=` operator seems like `intersect` but in this case, we need to
+    // take `union`.
+    unionAssumed(R);
+    return *this;
+  }
+
+  IntegerRangeState operator&=(const IntegerRangeState &R) {
+    // NOTE: `&=` operator seems like `intersect` but in this case, we need to
+    // take `union`.
+    unionKnown(R);
+    unionAssumed(R);
+    return *this;
+  }
+};
+/// Helper struct necessary as the modular build fails if the virtual method
+/// IRAttribute::manifest is defined in the Attributor.cpp.
+struct IRAttributeManifest {
+  static ChangeStatus manifestAttrs(Attributor &A, const IRPosition &IRP,
+                                    const ArrayRef<Attribute> &DeducedAttrs);
+};
+
+/// Helper to tie a abstract state implementation to an abstract attribute.
+template <typename StateTy, typename BaseType, class... Ts>
+struct StateWrapper : public BaseType, public StateTy {
+  /// Provide static access to the type of the state.
+  using StateType = StateTy;
+
+  StateWrapper(const IRPosition &IRP, Ts... Args)
+      : BaseType(IRP), StateTy(Args...) {}
+
+  /// See AbstractAttribute::getState(...).
+  StateType &getState() override { return *this; }
+
+  /// See AbstractAttribute::getState(...).
+  const StateType &getState() const override { return *this; }
+};
+
+/// Helper class that provides common functionality to manifest IR attributes.
+template <Attribute::AttrKind AK, typename BaseType>
+struct IRAttribute : public BaseType {
+  IRAttribute(const IRPosition &IRP) : BaseType(IRP) {}
+
+  /// See AbstractAttribute::initialize(...).
+  virtual void initialize(Attributor &A) override {
+    const IRPosition &IRP = this->getIRPosition();
+    if (isa<UndefValue>(IRP.getAssociatedValue()) ||
+        this->hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ false,
+                      &A)) {
+      this->getState().indicateOptimisticFixpoint();
+      return;
+    }
+
+    bool IsFnInterface = IRP.isFnInterfaceKind();
+    const Function *FnScope = IRP.getAnchorScope();
+    // TODO: Not all attributes require an exact definition. Find a way to
+    //       enable deduction for some but not all attributes in case the
+    //       definition might be changed at runtime, see also
+    //       http://lists.llvm.org/pipermail/llvm-dev/2018-February/121275.html.
+    // TODO: We could always determine abstract attributes and if sufficient
+    //       information was found we could duplicate the functions that do not
+    //       have an exact definition.
+    if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope)))
+      this->getState().indicatePessimisticFixpoint();
+  }
+
+  /// See AbstractAttribute::manifest(...).
+  ChangeStatus manifest(Attributor &A) override {
+    if (isa<UndefValue>(this->getIRPosition().getAssociatedValue()))
+      return ChangeStatus::UNCHANGED;
+    SmallVector<Attribute, 4> DeducedAttrs;
+    getDeducedAttributes(this->getAnchorValue().getContext(), DeducedAttrs);
+    return IRAttributeManifest::manifestAttrs(A, this->getIRPosition(),
+                                              DeducedAttrs);
+  }
+
+  /// Return the kind that identifies the abstract attribute implementation.
+  Attribute::AttrKind getAttrKind() const { return AK; }
+
+  /// Return the deduced attributes in \p Attrs.
+  virtual void getDeducedAttributes(LLVMContext &Ctx,
+                                    SmallVectorImpl<Attribute> &Attrs) const {
+    Attrs.emplace_back(Attribute::get(Ctx, getAttrKind()));
+  }
 };
 
 /// Base struct for all "concrete attribute" deductions.
@@ -493,34 +2262,22 @@
 ///       both directions will be added in the future.
 /// NOTE: The mechanics of adding a new "concrete" abstract attribute are
 ///       described in the file comment.
-struct AbstractAttribute {
+struct AbstractAttribute : public IRPosition, public AADepGraphNode {
+  using StateType = AbstractState;
 
-  /// The positions attributes can be manifested in.
-  enum ManifestPosition {
-    MP_ARGUMENT,           ///< An attribute for a function argument.
-    MP_CALL_SITE_ARGUMENT, ///< An attribute for a call site argument.
-    MP_FUNCTION,           ///< An attribute for a function as a whole.
-    MP_RETURNED,           ///< An attribute for the function return value.
-  };
-
-  /// An abstract attribute associated with \p AssociatedVal and anchored at
-  /// \p AnchoredVal.
-  ///
-  /// \param AssociatedVal The value this abstract attribute is associated with.
-  /// \param AnchoredVal The value this abstract attributes is anchored at.
-  /// \param InfoCache Cached information accessible to the abstract attribute.
-  AbstractAttribute(Value *AssociatedVal, Value &AnchoredVal,
-                    InformationCache &InfoCache)
-      : AssociatedVal(AssociatedVal), AnchoredVal(AnchoredVal),
-        InfoCache(InfoCache) {}
-
-  /// An abstract attribute associated with and anchored at \p V.
-  AbstractAttribute(Value &V, InformationCache &InfoCache)
-      : AbstractAttribute(&V, V, InfoCache) {}
+  AbstractAttribute(const IRPosition &IRP) : IRPosition(IRP) {}
 
   /// Virtual destructor.
   virtual ~AbstractAttribute() {}
 
+  /// This function is used to identify if an \p DGN is of type
+  /// AbstractAttribute so that the dyn_cast and cast can use such information
+  /// to cast an AADepGraphNode to an AbstractAttribute.
+  ///
+  /// We eagerly return true here because all AADepGraphNodes except for the
+  /// Synthethis Node are of type AbstractAttribute
+  static bool classof(const AADepGraphNode *DGN) { return true; }
+
   /// Initialize the state with the information in the Attributor \p A.
   ///
   /// This function is called by the Attributor once all abstract attributes
@@ -532,55 +2289,27 @@
   virtual void initialize(Attributor &A) {}
 
   /// Return the internal abstract state for inspection.
-  virtual const AbstractState &getState() const = 0;
+  virtual StateType &getState() = 0;
+  virtual const StateType &getState() const = 0;
 
-  /// Return the value this abstract attribute is anchored with.
-  ///
-  /// The anchored value might not be the associated value if the latter is not
-  /// sufficient to determine where arguments will be manifested. This is mostly
-  /// the case for call site arguments as the value is not sufficient to
-  /// pinpoint them. Instead, we can use the call site as an anchor.
-  ///
-  ///{
-  Value &getAnchoredValue() { return AnchoredVal; }
-  const Value &getAnchoredValue() const { return AnchoredVal; }
-  ///}
-
-  /// Return the llvm::Function surrounding the anchored value.
-  ///
-  ///{
-  Function &getAnchorScope();
-  const Function &getAnchorScope() const;
-  ///}
-
-  /// Return the value this abstract attribute is associated with.
-  ///
-  /// The abstract state usually represents this value.
-  ///
-  ///{
-  virtual Value *getAssociatedValue() { return AssociatedVal; }
-  virtual const Value *getAssociatedValue() const { return AssociatedVal; }
-  ///}
-
-  /// Return the position this abstract state is manifested in.
-  virtual ManifestPosition getManifestPosition() const = 0;
-
-  /// Return the kind that identifies the abstract attribute implementation.
-  virtual Attribute::AttrKind getAttrKind() const = 0;
-
-  /// Return the deduced attributes in \p Attrs.
-  virtual void getDeducedAttributes(SmallVectorImpl<Attribute> &Attrs) const {
-    LLVMContext &Ctx = AnchoredVal.getContext();
-    Attrs.emplace_back(Attribute::get(Ctx, getAttrKind()));
-  }
+  /// Return an IR position, see struct IRPosition.
+  const IRPosition &getIRPosition() const { return *this; };
+  IRPosition &getIRPosition() { return *this; };
 
   /// Helper functions, for debug purposes only.
   ///{
-  virtual void print(raw_ostream &OS) const;
+  void print(raw_ostream &OS) const override;
+  virtual void printWithDeps(raw_ostream &OS) const;
   void dump() const { print(dbgs()); }
 
   /// This function should return the "summarized" assumed state as string.
   virtual const std::string getAsStr() const = 0;
+
+  /// This function should return the name of the AbstractAttribute
+  virtual const std::string getName() const = 0;
+
+  /// This function should return the address of the ID of the AbstractAttribute
+  virtual const char *getIdAddr() const = 0;
   ///}
 
   /// Allow the Attributor access to the protected methods.
@@ -599,10 +2328,16 @@
   /// represented by the abstract attribute in the LLVM-IR.
   ///
   /// \Return CHANGED if the IR was altered, otherwise UNCHANGED.
-  virtual ChangeStatus manifest(Attributor &A);
+  virtual ChangeStatus manifest(Attributor &A) {
+    return ChangeStatus::UNCHANGED;
+  }
 
-  /// Return the internal abstract state for careful modification.
-  virtual AbstractState &getState() = 0;
+  /// Hook to enable custom statistic tracking, called after manifest that
+  /// resulted in a change if statistics are enabled.
+  ///
+  /// We require subclasses to provide an implementation so we remember to
+  /// add statistics for them.
+  virtual void trackStatistics() const = 0;
 
   /// The actual update/transfer function which has to be implemented by the
   /// derived classes.
@@ -612,15 +2347,6 @@
   ///
   /// \Return CHANGED if the internal state changed, otherwise UNCHANGED.
   virtual ChangeStatus updateImpl(Attributor &A) = 0;
-
-  /// The value this abstract attribute is associated with.
-  Value *AssociatedVal;
-
-  /// The value this abstract attribute is anchored at.
-  Value &AnchoredVal;
-
-  /// The information cache accessible to this abstract attribute.
-  InformationCache &InfoCache;
 };
 
 /// Forward declarations of output streams for debug purposes.
@@ -628,56 +2354,1442 @@
 ///{
 raw_ostream &operator<<(raw_ostream &OS, const AbstractAttribute &AA);
 raw_ostream &operator<<(raw_ostream &OS, ChangeStatus S);
-raw_ostream &operator<<(raw_ostream &OS, AbstractAttribute::ManifestPosition);
+raw_ostream &operator<<(raw_ostream &OS, IRPosition::Kind);
+raw_ostream &operator<<(raw_ostream &OS, const IRPosition &);
 raw_ostream &operator<<(raw_ostream &OS, const AbstractState &State);
+template <typename base_ty, base_ty BestState, base_ty WorstState>
+raw_ostream &
+operator<<(raw_ostream &OS,
+           const IntegerStateBase<base_ty, BestState, WorstState> &S) {
+  return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")"
+            << static_cast<const AbstractState &>(S);
+}
+raw_ostream &operator<<(raw_ostream &OS, const IntegerRangeState &State);
 ///}
 
 struct AttributorPass : public PassInfoMixin<AttributorPass> {
   PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
 };
+struct AttributorCGSCCPass : public PassInfoMixin<AttributorCGSCCPass> {
+  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &UR);
+};
 
 Pass *createAttributorLegacyPass();
+Pass *createAttributorCGSCCLegacyPass();
 
 /// ----------------------------------------------------------------------------
 ///                       Abstract Attribute Classes
 /// ----------------------------------------------------------------------------
 
 /// An abstract attribute for the returned values of a function.
-struct AAReturnedValues : public AbstractAttribute {
-  /// See AbstractAttribute::AbstractAttribute(...).
-  AAReturnedValues(Function &F, InformationCache &InfoCache)
-      : AbstractAttribute(F, InfoCache) {}
+struct AAReturnedValues
+    : public IRAttribute<Attribute::Returned, AbstractAttribute> {
+  AAReturnedValues(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
+
+  /// Return an assumed unique return value if a single candidate is found. If
+  /// there cannot be one, return a nullptr. If it is not clear yet, return the
+  /// Optional::NoneType.
+  Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
 
   /// Check \p Pred on all returned values.
   ///
   /// This method will evaluate \p Pred on returned values and return
   /// true if (1) all returned values are known, and (2) \p Pred returned true
   /// for all returned values.
-  virtual bool
-  checkForallReturnedValues(std::function<bool(Value &)> &Pred) const = 0;
+  ///
+  /// Note: Unlike the Attributor::checkForAllReturnedValuesAndReturnInsts
+  /// method, this one will not filter dead return instructions.
+  virtual bool checkForAllReturnedValuesAndReturnInsts(
+      function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
+      const = 0;
 
-  /// See AbstractAttribute::getAttrKind()
-  virtual Attribute::AttrKind getAttrKind() const override { return ID; }
+  using iterator =
+      MapVector<Value *, SmallSetVector<ReturnInst *, 4>>::iterator;
+  using const_iterator =
+      MapVector<Value *, SmallSetVector<ReturnInst *, 4>>::const_iterator;
+  virtual llvm::iterator_range<iterator> returned_values() = 0;
+  virtual llvm::iterator_range<const_iterator> returned_values() const = 0;
 
-  /// The identifier used by the Attributor for this class of attributes.
-  static constexpr Attribute::AttrKind ID = Attribute::Returned;
+  virtual size_t getNumReturnValues() const = 0;
+  virtual const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const = 0;
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AAReturnedValues &createForPosition(const IRPosition &IRP,
+                                             Attributor &A);
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AAReturnedValues"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is
+  /// AAReturnedValues
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
 };
 
-struct AANoUnwind : public AbstractAttribute {
-    /// An abstract interface for all nosync attributes.
-    AANoUnwind(Value &V, InformationCache &InfoCache)
-        : AbstractAttribute(V, InfoCache) {}
+struct AANoUnwind
+    : public IRAttribute<Attribute::NoUnwind,
+                         StateWrapper<BooleanState, AbstractAttribute>> {
+  AANoUnwind(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
 
-    /// See AbstractAttribute::getAttrKind()/
-    virtual Attribute::AttrKind getAttrKind() const override { return ID; }
+  /// Returns true if nounwind is assumed.
+  bool isAssumedNoUnwind() const { return getAssumed(); }
 
-    static constexpr Attribute::AttrKind ID = Attribute::NoUnwind;
+  /// Returns true if nounwind is known.
+  bool isKnownNoUnwind() const { return getKnown(); }
 
-    /// Returns true if nounwind is assumed.
-    virtual bool isAssumedNoUnwind() const = 0;
+  /// Create an abstract attribute view for the position \p IRP.
+  static AANoUnwind &createForPosition(const IRPosition &IRP, Attributor &A);
 
-    /// Returns true if nounwind is known.
-    virtual bool isKnownNoUnwind() const = 0;
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AANoUnwind"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is AANoUnwind
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+struct AANoSync
+    : public IRAttribute<Attribute::NoSync,
+                         StateWrapper<BooleanState, AbstractAttribute>> {
+  AANoSync(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
+
+  /// Returns true if "nosync" is assumed.
+  bool isAssumedNoSync() const { return getAssumed(); }
+
+  /// Returns true if "nosync" is known.
+  bool isKnownNoSync() const { return getKnown(); }
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AANoSync &createForPosition(const IRPosition &IRP, Attributor &A);
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AANoSync"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is AANoSync
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+/// An abstract interface for all nonnull attributes.
+struct AANonNull
+    : public IRAttribute<Attribute::NonNull,
+                         StateWrapper<BooleanState, AbstractAttribute>> {
+  AANonNull(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
+
+  /// Return true if we assume that the underlying value is nonnull.
+  bool isAssumedNonNull() const { return getAssumed(); }
+
+  /// Return true if we know that underlying value is nonnull.
+  bool isKnownNonNull() const { return getKnown(); }
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AANonNull &createForPosition(const IRPosition &IRP, Attributor &A);
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AANonNull"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is AANonNull
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+/// An abstract attribute for norecurse.
+struct AANoRecurse
+    : public IRAttribute<Attribute::NoRecurse,
+                         StateWrapper<BooleanState, AbstractAttribute>> {
+  AANoRecurse(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
+
+  /// Return true if "norecurse" is assumed.
+  bool isAssumedNoRecurse() const { return getAssumed(); }
+
+  /// Return true if "norecurse" is known.
+  bool isKnownNoRecurse() const { return getKnown(); }
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AANoRecurse &createForPosition(const IRPosition &IRP, Attributor &A);
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AANoRecurse"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is AANoRecurse
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+/// An abstract attribute for willreturn.
+struct AAWillReturn
+    : public IRAttribute<Attribute::WillReturn,
+                         StateWrapper<BooleanState, AbstractAttribute>> {
+  AAWillReturn(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
+
+  /// Return true if "willreturn" is assumed.
+  bool isAssumedWillReturn() const { return getAssumed(); }
+
+  /// Return true if "willreturn" is known.
+  bool isKnownWillReturn() const { return getKnown(); }
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AAWillReturn &createForPosition(const IRPosition &IRP, Attributor &A);
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AAWillReturn"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is AAWillReturn
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+/// An abstract attribute for undefined behavior.
+struct AAUndefinedBehavior
+    : public StateWrapper<BooleanState, AbstractAttribute> {
+  using Base = StateWrapper<BooleanState, AbstractAttribute>;
+  AAUndefinedBehavior(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
+
+  /// Return true if "undefined behavior" is assumed.
+  bool isAssumedToCauseUB() const { return getAssumed(); }
+
+  /// Return true if "undefined behavior" is assumed for a specific instruction.
+  virtual bool isAssumedToCauseUB(Instruction *I) const = 0;
+
+  /// Return true if "undefined behavior" is known.
+  bool isKnownToCauseUB() const { return getKnown(); }
+
+  /// Return true if "undefined behavior" is known for a specific instruction.
+  virtual bool isKnownToCauseUB(Instruction *I) const = 0;
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AAUndefinedBehavior &createForPosition(const IRPosition &IRP,
+                                                Attributor &A);
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AAUndefinedBehavior"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is
+  /// AAUndefineBehavior
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+/// An abstract interface to determine reachability of point A to B.
+struct AAReachability : public StateWrapper<BooleanState, AbstractAttribute> {
+  using Base = StateWrapper<BooleanState, AbstractAttribute>;
+  AAReachability(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
+
+  /// Returns true if 'From' instruction is assumed to reach, 'To' instruction.
+  /// Users should provide two positions they are interested in, and the class
+  /// determines (and caches) reachability.
+  bool isAssumedReachable(Attributor &A, const Instruction &From,
+                          const Instruction &To) const {
+    return A.getInfoCache().getPotentiallyReachable(From, To);
+  }
+
+  /// Returns true if 'From' instruction is known to reach, 'To' instruction.
+  /// Users should provide two positions they are interested in, and the class
+  /// determines (and caches) reachability.
+  bool isKnownReachable(Attributor &A, const Instruction &From,
+                        const Instruction &To) const {
+    return A.getInfoCache().getPotentiallyReachable(From, To);
+  }
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AAReachability &createForPosition(const IRPosition &IRP,
+                                           Attributor &A);
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AAReachability"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is
+  /// AAReachability
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+/// An abstract interface for all noalias attributes.
+struct AANoAlias
+    : public IRAttribute<Attribute::NoAlias,
+                         StateWrapper<BooleanState, AbstractAttribute>> {
+  AANoAlias(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
+
+  /// Return true if we assume that the underlying value is alias.
+  bool isAssumedNoAlias() const { return getAssumed(); }
+
+  /// Return true if we know that underlying value is noalias.
+  bool isKnownNoAlias() const { return getKnown(); }
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AANoAlias &createForPosition(const IRPosition &IRP, Attributor &A);
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AANoAlias"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is AANoAlias
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+/// An AbstractAttribute for nofree.
+struct AANoFree
+    : public IRAttribute<Attribute::NoFree,
+                         StateWrapper<BooleanState, AbstractAttribute>> {
+  AANoFree(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
+
+  /// Return true if "nofree" is assumed.
+  bool isAssumedNoFree() const { return getAssumed(); }
+
+  /// Return true if "nofree" is known.
+  bool isKnownNoFree() const { return getKnown(); }
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AANoFree &createForPosition(const IRPosition &IRP, Attributor &A);
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AANoFree"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is AANoFree
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+/// An AbstractAttribute for noreturn.
+struct AANoReturn
+    : public IRAttribute<Attribute::NoReturn,
+                         StateWrapper<BooleanState, AbstractAttribute>> {
+  AANoReturn(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
+
+  /// Return true if the underlying object is assumed to never return.
+  bool isAssumedNoReturn() const { return getAssumed(); }
+
+  /// Return true if the underlying object is known to never return.
+  bool isKnownNoReturn() const { return getKnown(); }
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AANoReturn &createForPosition(const IRPosition &IRP, Attributor &A);
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AANoReturn"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is AANoReturn
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+/// An abstract interface for liveness abstract attribute.
+struct AAIsDead : public StateWrapper<BooleanState, AbstractAttribute> {
+  using Base = StateWrapper<BooleanState, AbstractAttribute>;
+  AAIsDead(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
+
+protected:
+  /// The query functions are protected such that other attributes need to go
+  /// through the Attributor interfaces: `Attributor::isAssumedDead(...)`
+
+  /// Returns true if the underlying value is assumed dead.
+  virtual bool isAssumedDead() const = 0;
+
+  /// Returns true if the underlying value is known dead.
+  virtual bool isKnownDead() const = 0;
+
+  /// Returns true if \p BB is assumed dead.
+  virtual bool isAssumedDead(const BasicBlock *BB) const = 0;
+
+  /// Returns true if \p BB is known dead.
+  virtual bool isKnownDead(const BasicBlock *BB) const = 0;
+
+  /// Returns true if \p I is assumed dead.
+  virtual bool isAssumedDead(const Instruction *I) const = 0;
+
+  /// Returns true if \p I is known dead.
+  virtual bool isKnownDead(const Instruction *I) const = 0;
+
+  /// This method is used to check if at least one instruction in a collection
+  /// of instructions is live.
+  template <typename T> bool isLiveInstSet(T begin, T end) const {
+    for (const auto &I : llvm::make_range(begin, end)) {
+      assert(I->getFunction() == getIRPosition().getAssociatedFunction() &&
+             "Instruction must be in the same anchor scope function.");
+
+      if (!isAssumedDead(I))
+        return true;
+    }
+
+    return false;
+  }
+
+public:
+  /// Create an abstract attribute view for the position \p IRP.
+  static AAIsDead &createForPosition(const IRPosition &IRP, Attributor &A);
+
+  /// Determine if \p F might catch asynchronous exceptions.
+  static bool mayCatchAsynchronousExceptions(const Function &F) {
+    return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
+  }
+
+  /// Return if the edge from \p From BB to \p To BB is assumed dead.
+  /// This is specifically useful in AAReachability.
+  virtual bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const {
+    return false;
+  }
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AAIsDead"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is AAIsDead
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+
+  friend struct Attributor;
+};
+
+/// State for dereferenceable attribute
+struct DerefState : AbstractState {
+
+  static DerefState getBestState() { return DerefState(); }
+  static DerefState getBestState(const DerefState &) { return getBestState(); }
+
+  /// Return the worst possible representable state.
+  static DerefState getWorstState() {
+    DerefState DS;
+    DS.indicatePessimisticFixpoint();
+    return DS;
+  }
+  static DerefState getWorstState(const DerefState &) {
+    return getWorstState();
+  }
+
+  /// State representing for dereferenceable bytes.
+  IncIntegerState<> DerefBytesState;
+
+  /// Map representing for accessed memory offsets and sizes.
+  /// A key is Offset and a value is size.
+  /// If there is a load/store instruction something like,
+  ///   p[offset] = v;
+  /// (offset, sizeof(v)) will be inserted to this map.
+  /// std::map is used because we want to iterate keys in ascending order.
+  std::map<int64_t, uint64_t> AccessedBytesMap;
+
+  /// Helper function to calculate dereferenceable bytes from current known
+  /// bytes and accessed bytes.
+  ///
+  /// int f(int *A){
+  ///    *A = 0;
+  ///    *(A+2) = 2;
+  ///    *(A+1) = 1;
+  ///    *(A+10) = 10;
+  /// }
+  /// ```
+  /// In that case, AccessedBytesMap is `{0:4, 4:4, 8:4, 40:4}`.
+  /// AccessedBytesMap is std::map so it is iterated in accending order on
+  /// key(Offset). So KnownBytes will be updated like this:
+  ///
+  /// |Access | KnownBytes
+  /// |(0, 4)| 0 -> 4
+  /// |(4, 4)| 4 -> 8
+  /// |(8, 4)| 8 -> 12
+  /// |(40, 4) | 12 (break)
+  void computeKnownDerefBytesFromAccessedMap() {
+    int64_t KnownBytes = DerefBytesState.getKnown();
+    for (auto &Access : AccessedBytesMap) {
+      if (KnownBytes < Access.first)
+        break;
+      KnownBytes = std::max(KnownBytes, Access.first + (int64_t)Access.second);
+    }
+
+    DerefBytesState.takeKnownMaximum(KnownBytes);
+  }
+
+  /// State representing that whether the value is globaly dereferenceable.
+  BooleanState GlobalState;
+
+  /// See AbstractState::isValidState()
+  bool isValidState() const override { return DerefBytesState.isValidState(); }
+
+  /// See AbstractState::isAtFixpoint()
+  bool isAtFixpoint() const override {
+    return !isValidState() ||
+           (DerefBytesState.isAtFixpoint() && GlobalState.isAtFixpoint());
+  }
+
+  /// See AbstractState::indicateOptimisticFixpoint(...)
+  ChangeStatus indicateOptimisticFixpoint() override {
+    DerefBytesState.indicateOptimisticFixpoint();
+    GlobalState.indicateOptimisticFixpoint();
+    return ChangeStatus::UNCHANGED;
+  }
+
+  /// See AbstractState::indicatePessimisticFixpoint(...)
+  ChangeStatus indicatePessimisticFixpoint() override {
+    DerefBytesState.indicatePessimisticFixpoint();
+    GlobalState.indicatePessimisticFixpoint();
+    return ChangeStatus::CHANGED;
+  }
+
+  /// Update known dereferenceable bytes.
+  void takeKnownDerefBytesMaximum(uint64_t Bytes) {
+    DerefBytesState.takeKnownMaximum(Bytes);
+
+    // Known bytes might increase.
+    computeKnownDerefBytesFromAccessedMap();
+  }
+
+  /// Update assumed dereferenceable bytes.
+  void takeAssumedDerefBytesMinimum(uint64_t Bytes) {
+    DerefBytesState.takeAssumedMinimum(Bytes);
+  }
+
+  /// Add accessed bytes to the map.
+  void addAccessedBytes(int64_t Offset, uint64_t Size) {
+    uint64_t &AccessedBytes = AccessedBytesMap[Offset];
+    AccessedBytes = std::max(AccessedBytes, Size);
+
+    // Known bytes might increase.
+    computeKnownDerefBytesFromAccessedMap();
+  }
+
+  /// Equality for DerefState.
+  bool operator==(const DerefState &R) const {
+    return this->DerefBytesState == R.DerefBytesState &&
+           this->GlobalState == R.GlobalState;
+  }
+
+  /// Inequality for DerefState.
+  bool operator!=(const DerefState &R) const { return !(*this == R); }
+
+  /// See IntegerStateBase::operator^=
+  DerefState operator^=(const DerefState &R) {
+    DerefBytesState ^= R.DerefBytesState;
+    GlobalState ^= R.GlobalState;
+    return *this;
+  }
+
+  /// See IntegerStateBase::operator+=
+  DerefState operator+=(const DerefState &R) {
+    DerefBytesState += R.DerefBytesState;
+    GlobalState += R.GlobalState;
+    return *this;
+  }
+
+  /// See IntegerStateBase::operator&=
+  DerefState operator&=(const DerefState &R) {
+    DerefBytesState &= R.DerefBytesState;
+    GlobalState &= R.GlobalState;
+    return *this;
+  }
+
+  /// See IntegerStateBase::operator|=
+  DerefState operator|=(const DerefState &R) {
+    DerefBytesState |= R.DerefBytesState;
+    GlobalState |= R.GlobalState;
+    return *this;
+  }
+
+protected:
+  const AANonNull *NonNullAA = nullptr;
+};
+
+/// An abstract interface for all dereferenceable attribute.
+struct AADereferenceable
+    : public IRAttribute<Attribute::Dereferenceable,
+                         StateWrapper<DerefState, AbstractAttribute>> {
+  AADereferenceable(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
+
+  /// Return true if we assume that the underlying value is nonnull.
+  bool isAssumedNonNull() const {
+    return NonNullAA && NonNullAA->isAssumedNonNull();
+  }
+
+  /// Return true if we know that the underlying value is nonnull.
+  bool isKnownNonNull() const {
+    return NonNullAA && NonNullAA->isKnownNonNull();
+  }
+
+  /// Return true if we assume that underlying value is
+  /// dereferenceable(_or_null) globally.
+  bool isAssumedGlobal() const { return GlobalState.getAssumed(); }
+
+  /// Return true if we know that underlying value is
+  /// dereferenceable(_or_null) globally.
+  bool isKnownGlobal() const { return GlobalState.getKnown(); }
+
+  /// Return assumed dereferenceable bytes.
+  uint32_t getAssumedDereferenceableBytes() const {
+    return DerefBytesState.getAssumed();
+  }
+
+  /// Return known dereferenceable bytes.
+  uint32_t getKnownDereferenceableBytes() const {
+    return DerefBytesState.getKnown();
+  }
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AADereferenceable &createForPosition(const IRPosition &IRP,
+                                              Attributor &A);
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AADereferenceable"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is
+  /// AADereferenceable
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+using AAAlignmentStateType =
+    IncIntegerState<uint32_t, Value::MaximumAlignment, 0>;
+/// An abstract interface for all align attributes.
+struct AAAlign : public IRAttribute<
+                     Attribute::Alignment,
+                     StateWrapper<AAAlignmentStateType, AbstractAttribute>> {
+  AAAlign(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
+
+  /// Return assumed alignment.
+  unsigned getAssumedAlign() const { return getAssumed(); }
+
+  /// Return known alignment.
+  unsigned getKnownAlign() const { return getKnown(); }
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AAAlign"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is AAAlign
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AAAlign &createForPosition(const IRPosition &IRP, Attributor &A);
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+/// An abstract interface for all nocapture attributes.
+struct AANoCapture
+    : public IRAttribute<
+          Attribute::NoCapture,
+          StateWrapper<BitIntegerState<uint16_t, 7, 0>, AbstractAttribute>> {
+  AANoCapture(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
+
+  /// State encoding bits. A set bit in the state means the property holds.
+  /// NO_CAPTURE is the best possible state, 0 the worst possible state.
+  enum {
+    NOT_CAPTURED_IN_MEM = 1 << 0,
+    NOT_CAPTURED_IN_INT = 1 << 1,
+    NOT_CAPTURED_IN_RET = 1 << 2,
+
+    /// If we do not capture the value in memory or through integers we can only
+    /// communicate it back as a derived pointer.
+    NO_CAPTURE_MAYBE_RETURNED = NOT_CAPTURED_IN_MEM | NOT_CAPTURED_IN_INT,
+
+    /// If we do not capture the value in memory, through integers, or as a
+    /// derived pointer we know it is not captured.
+    NO_CAPTURE =
+        NOT_CAPTURED_IN_MEM | NOT_CAPTURED_IN_INT | NOT_CAPTURED_IN_RET,
+  };
+
+  /// Return true if we know that the underlying value is not captured in its
+  /// respective scope.
+  bool isKnownNoCapture() const { return isKnown(NO_CAPTURE); }
+
+  /// Return true if we assume that the underlying value is not captured in its
+  /// respective scope.
+  bool isAssumedNoCapture() const { return isAssumed(NO_CAPTURE); }
+
+  /// Return true if we know that the underlying value is not captured in its
+  /// respective scope but we allow it to escape through a "return".
+  bool isKnownNoCaptureMaybeReturned() const {
+    return isKnown(NO_CAPTURE_MAYBE_RETURNED);
+  }
+
+  /// Return true if we assume that the underlying value is not captured in its
+  /// respective scope but we allow it to escape through a "return".
+  bool isAssumedNoCaptureMaybeReturned() const {
+    return isAssumed(NO_CAPTURE_MAYBE_RETURNED);
+  }
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AANoCapture &createForPosition(const IRPosition &IRP, Attributor &A);
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AANoCapture"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is AANoCapture
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+/// An abstract interface for value simplify abstract attribute.
+struct AAValueSimplify : public StateWrapper<BooleanState, AbstractAttribute> {
+  using Base = StateWrapper<BooleanState, AbstractAttribute>;
+  AAValueSimplify(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
+
+  /// Return an assumed simplified value if a single candidate is found. If
+  /// there cannot be one, return original value. If it is not clear yet, return
+  /// the Optional::NoneType.
+  virtual Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const = 0;
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AAValueSimplify &createForPosition(const IRPosition &IRP,
+                                            Attributor &A);
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AAValueSimplify"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is
+  /// AAValueSimplify
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+struct AAHeapToStack : public StateWrapper<BooleanState, AbstractAttribute> {
+  using Base = StateWrapper<BooleanState, AbstractAttribute>;
+  AAHeapToStack(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
+
+  /// Returns true if HeapToStack conversion is assumed to be possible.
+  bool isAssumedHeapToStack() const { return getAssumed(); }
+
+  /// Returns true if HeapToStack conversion is known to be possible.
+  bool isKnownHeapToStack() const { return getKnown(); }
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AAHeapToStack &createForPosition(const IRPosition &IRP, Attributor &A);
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AAHeapToStack"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is AAHeapToStack
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+/// An abstract interface for privatizability.
+///
+/// A pointer is privatizable if it can be replaced by a new, private one.
+/// Privatizing pointer reduces the use count, interaction between unrelated
+/// code parts.
+///
+/// In order for a pointer to be privatizable its value cannot be observed
+/// (=nocapture), it is (for now) not written (=readonly & noalias), we know
+/// what values are necessary to make the private copy look like the original
+/// one, and the values we need can be loaded (=dereferenceable).
+struct AAPrivatizablePtr
+    : public StateWrapper<BooleanState, AbstractAttribute> {
+  using Base = StateWrapper<BooleanState, AbstractAttribute>;
+  AAPrivatizablePtr(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
+
+  /// Returns true if pointer privatization is assumed to be possible.
+  bool isAssumedPrivatizablePtr() const { return getAssumed(); }
+
+  /// Returns true if pointer privatization is known to be possible.
+  bool isKnownPrivatizablePtr() const { return getKnown(); }
+
+  /// Return the type we can choose for a private copy of the underlying
+  /// value. None means it is not clear yet, nullptr means there is none.
+  virtual Optional<Type *> getPrivatizableType() const = 0;
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AAPrivatizablePtr &createForPosition(const IRPosition &IRP,
+                                              Attributor &A);
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AAPrivatizablePtr"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is
+  /// AAPricatizablePtr
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+/// An abstract interface for memory access kind related attributes
+/// (readnone/readonly/writeonly).
+struct AAMemoryBehavior
+    : public IRAttribute<
+          Attribute::ReadNone,
+          StateWrapper<BitIntegerState<uint8_t, 3>, AbstractAttribute>> {
+  AAMemoryBehavior(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
+
+  /// State encoding bits. A set bit in the state means the property holds.
+  /// BEST_STATE is the best possible state, 0 the worst possible state.
+  enum {
+    NO_READS = 1 << 0,
+    NO_WRITES = 1 << 1,
+    NO_ACCESSES = NO_READS | NO_WRITES,
+
+    BEST_STATE = NO_ACCESSES,
+  };
+  static_assert(BEST_STATE == getBestState(), "Unexpected BEST_STATE value");
+
+  /// Return true if we know that the underlying value is not read or accessed
+  /// in its respective scope.
+  bool isKnownReadNone() const { return isKnown(NO_ACCESSES); }
+
+  /// Return true if we assume that the underlying value is not read or accessed
+  /// in its respective scope.
+  bool isAssumedReadNone() const { return isAssumed(NO_ACCESSES); }
+
+  /// Return true if we know that the underlying value is not accessed
+  /// (=written) in its respective scope.
+  bool isKnownReadOnly() const { return isKnown(NO_WRITES); }
+
+  /// Return true if we assume that the underlying value is not accessed
+  /// (=written) in its respective scope.
+  bool isAssumedReadOnly() const { return isAssumed(NO_WRITES); }
+
+  /// Return true if we know that the underlying value is not read in its
+  /// respective scope.
+  bool isKnownWriteOnly() const { return isKnown(NO_READS); }
+
+  /// Return true if we assume that the underlying value is not read in its
+  /// respective scope.
+  bool isAssumedWriteOnly() const { return isAssumed(NO_READS); }
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AAMemoryBehavior &createForPosition(const IRPosition &IRP,
+                                             Attributor &A);
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AAMemoryBehavior"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is
+  /// AAMemoryBehavior
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+/// An abstract interface for all memory location attributes
+/// (readnone/argmemonly/inaccessiblememonly/inaccessibleorargmemonly).
+struct AAMemoryLocation
+    : public IRAttribute<
+          Attribute::ReadNone,
+          StateWrapper<BitIntegerState<uint32_t, 511>, AbstractAttribute>> {
+  using MemoryLocationsKind = StateType::base_t;
+
+  AAMemoryLocation(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
+
+  /// Encoding of different locations that could be accessed by a memory
+  /// access.
+  enum {
+    ALL_LOCATIONS = 0,
+    NO_LOCAL_MEM = 1 << 0,
+    NO_CONST_MEM = 1 << 1,
+    NO_GLOBAL_INTERNAL_MEM = 1 << 2,
+    NO_GLOBAL_EXTERNAL_MEM = 1 << 3,
+    NO_GLOBAL_MEM = NO_GLOBAL_INTERNAL_MEM | NO_GLOBAL_EXTERNAL_MEM,
+    NO_ARGUMENT_MEM = 1 << 4,
+    NO_INACCESSIBLE_MEM = 1 << 5,
+    NO_MALLOCED_MEM = 1 << 6,
+    NO_UNKOWN_MEM = 1 << 7,
+    NO_LOCATIONS = NO_LOCAL_MEM | NO_CONST_MEM | NO_GLOBAL_INTERNAL_MEM |
+                   NO_GLOBAL_EXTERNAL_MEM | NO_ARGUMENT_MEM |
+                   NO_INACCESSIBLE_MEM | NO_MALLOCED_MEM | NO_UNKOWN_MEM,
+
+    // Helper bit to track if we gave up or not.
+    VALID_STATE = NO_LOCATIONS + 1,
+
+    BEST_STATE = NO_LOCATIONS | VALID_STATE,
+  };
+  static_assert(BEST_STATE == getBestState(), "Unexpected BEST_STATE value");
+
+  /// Return true if we know that the associated functions has no observable
+  /// accesses.
+  bool isKnownReadNone() const { return isKnown(NO_LOCATIONS); }
+
+  /// Return true if we assume that the associated functions has no observable
+  /// accesses.
+  bool isAssumedReadNone() const {
+    return isAssumed(NO_LOCATIONS) | isAssumedStackOnly();
+  }
+
+  /// Return true if we know that the associated functions has at most
+  /// local/stack accesses.
+  bool isKnowStackOnly() const {
+    return isKnown(inverseLocation(NO_LOCAL_MEM, true, true));
+  }
+
+  /// Return true if we assume that the associated functions has at most
+  /// local/stack accesses.
+  bool isAssumedStackOnly() const {
+    return isAssumed(inverseLocation(NO_LOCAL_MEM, true, true));
+  }
+
+  /// Return true if we know that the underlying value will only access
+  /// inaccesible memory only (see Attribute::InaccessibleMemOnly).
+  bool isKnownInaccessibleMemOnly() const {
+    return isKnown(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
+  }
+
+  /// Return true if we assume that the underlying value will only access
+  /// inaccesible memory only (see Attribute::InaccessibleMemOnly).
+  bool isAssumedInaccessibleMemOnly() const {
+    return isAssumed(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
+  }
+
+  /// Return true if we know that the underlying value will only access
+  /// argument pointees (see Attribute::ArgMemOnly).
+  bool isKnownArgMemOnly() const {
+    return isKnown(inverseLocation(NO_ARGUMENT_MEM, true, true));
+  }
+
+  /// Return true if we assume that the underlying value will only access
+  /// argument pointees (see Attribute::ArgMemOnly).
+  bool isAssumedArgMemOnly() const {
+    return isAssumed(inverseLocation(NO_ARGUMENT_MEM, true, true));
+  }
+
+  /// Return true if we know that the underlying value will only access
+  /// inaccesible memory or argument pointees (see
+  /// Attribute::InaccessibleOrArgMemOnly).
+  bool isKnownInaccessibleOrArgMemOnly() const {
+    return isKnown(
+        inverseLocation(NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
+  }
+
+  /// Return true if we assume that the underlying value will only access
+  /// inaccesible memory or argument pointees (see
+  /// Attribute::InaccessibleOrArgMemOnly).
+  bool isAssumedInaccessibleOrArgMemOnly() const {
+    return isAssumed(
+        inverseLocation(NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
+  }
+
+  /// Return true if the underlying value may access memory through arguement
+  /// pointers of the associated function, if any.
+  bool mayAccessArgMem() const { return !isAssumed(NO_ARGUMENT_MEM); }
+
+  /// Return true if only the memory locations specififed by \p MLK are assumed
+  /// to be accessed by the associated function.
+  bool isAssumedSpecifiedMemOnly(MemoryLocationsKind MLK) const {
+    return isAssumed(MLK);
+  }
+
+  /// Return the locations that are assumed to be not accessed by the associated
+  /// function, if any.
+  MemoryLocationsKind getAssumedNotAccessedLocation() const {
+    return getAssumed();
+  }
+
+  /// Return the inverse of location \p Loc, thus for NO_XXX the return
+  /// describes ONLY_XXX. The flags \p AndLocalMem and \p AndConstMem determine
+  /// if local (=stack) and constant memory are allowed as well. Most of the
+  /// time we do want them to be included, e.g., argmemonly allows accesses via
+  /// argument pointers or local or constant memory accesses.
+  static MemoryLocationsKind
+  inverseLocation(MemoryLocationsKind Loc, bool AndLocalMem, bool AndConstMem) {
+    return NO_LOCATIONS & ~(Loc | (AndLocalMem ? NO_LOCAL_MEM : 0) |
+                            (AndConstMem ? NO_CONST_MEM : 0));
+  };
+
+  /// Return the locations encoded by \p MLK as a readable string.
+  static std::string getMemoryLocationsAsStr(MemoryLocationsKind MLK);
+
+  /// Simple enum to distinguish read/write/read-write accesses.
+  enum AccessKind {
+    NONE = 0,
+    READ = 1 << 0,
+    WRITE = 1 << 1,
+    READ_WRITE = READ | WRITE,
+  };
+
+  /// Check \p Pred on all accesses to the memory kinds specified by \p MLK.
+  ///
+  /// This method will evaluate \p Pred on all accesses (access instruction +
+  /// underlying accessed memory pointer) and it will return true if \p Pred
+  /// holds every time.
+  virtual bool checkForAllAccessesToMemoryKind(
+      function_ref<bool(const Instruction *, const Value *, AccessKind,
+                        MemoryLocationsKind)>
+          Pred,
+      MemoryLocationsKind MLK) const = 0;
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AAMemoryLocation &createForPosition(const IRPosition &IRP,
+                                             Attributor &A);
+
+  /// See AbstractState::getAsStr().
+  const std::string getAsStr() const override {
+    return getMemoryLocationsAsStr(getAssumedNotAccessedLocation());
+  }
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AAMemoryLocation"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is
+  /// AAMemoryLocation
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+/// An abstract interface for range value analysis.
+struct AAValueConstantRange
+    : public StateWrapper<IntegerRangeState, AbstractAttribute, uint32_t> {
+  using Base = StateWrapper<IntegerRangeState, AbstractAttribute, uint32_t>;
+  AAValueConstantRange(const IRPosition &IRP, Attributor &A)
+      : Base(IRP, IRP.getAssociatedType()->getIntegerBitWidth()) {}
+
+  /// See AbstractAttribute::getState(...).
+  IntegerRangeState &getState() override { return *this; }
+  const IntegerRangeState &getState() const override { return *this; }
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AAValueConstantRange &createForPosition(const IRPosition &IRP,
+                                                 Attributor &A);
+
+  /// Return an assumed range for the assocaited value a program point \p CtxI.
+  /// If \p I is nullptr, simply return an assumed range.
+  virtual ConstantRange
+  getAssumedConstantRange(Attributor &A,
+                          const Instruction *CtxI = nullptr) const = 0;
+
+  /// Return a known range for the assocaited value at a program point \p CtxI.
+  /// If \p I is nullptr, simply return a known range.
+  virtual ConstantRange
+  getKnownConstantRange(Attributor &A,
+                        const Instruction *CtxI = nullptr) const = 0;
+
+  /// Return an assumed constant for the assocaited value a program point \p
+  /// CtxI.
+  Optional<ConstantInt *>
+  getAssumedConstantInt(Attributor &A,
+                        const Instruction *CtxI = nullptr) const {
+    ConstantRange RangeV = getAssumedConstantRange(A, CtxI);
+    if (auto *C = RangeV.getSingleElement())
+      return cast<ConstantInt>(
+          ConstantInt::get(getAssociatedValue().getType(), *C));
+    if (RangeV.isEmptySet())
+      return llvm::None;
+    return nullptr;
+  }
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AAValueConstantRange"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is
+  /// AAValueConstantRange
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+/// A class for a set state.
+/// The assumed boolean state indicates whether the corresponding set is full
+/// set or not. If the assumed state is false, this is the worst state. The
+/// worst state (invalid state) of set of potential values is when the set
+/// contains every possible value (i.e. we cannot in any way limit the value
+/// that the target position can take). That never happens naturally, we only
+/// force it. As for the conditions under which we force it, see
+/// AAPotentialValues.
+template <typename MemberTy, typename KeyInfo = DenseMapInfo<MemberTy>>
+struct PotentialValuesState : AbstractState {
+  using SetTy = DenseSet<MemberTy, KeyInfo>;
+
+  PotentialValuesState() : IsValidState(true), UndefIsContained(false) {}
+
+  PotentialValuesState(bool IsValid)
+      : IsValidState(IsValid), UndefIsContained(false) {}
+
+  /// See AbstractState::isValidState(...)
+  bool isValidState() const override { return IsValidState.isValidState(); }
+
+  /// See AbstractState::isAtFixpoint(...)
+  bool isAtFixpoint() const override { return IsValidState.isAtFixpoint(); }
+
+  /// See AbstractState::indicatePessimisticFixpoint(...)
+  ChangeStatus indicatePessimisticFixpoint() override {
+    return IsValidState.indicatePessimisticFixpoint();
+  }
+
+  /// See AbstractState::indicateOptimisticFixpoint(...)
+  ChangeStatus indicateOptimisticFixpoint() override {
+    return IsValidState.indicateOptimisticFixpoint();
+  }
+
+  /// Return the assumed state
+  PotentialValuesState &getAssumed() { return *this; }
+  const PotentialValuesState &getAssumed() const { return *this; }
+
+  /// Return this set. We should check whether this set is valid or not by
+  /// isValidState() before calling this function.
+  const SetTy &getAssumedSet() const {
+    assert(isValidState() && "This set shoud not be used when it is invalid!");
+    return Set;
+  }
+
+  /// Returns whether this state contains an undef value or not.
+  bool undefIsContained() const {
+    assert(isValidState() && "This flag shoud not be used when it is invalid!");
+    return UndefIsContained;
+  }
+
+  bool operator==(const PotentialValuesState &RHS) const {
+    if (isValidState() != RHS.isValidState())
+      return false;
+    if (!isValidState() && !RHS.isValidState())
+      return true;
+    if (undefIsContained() != RHS.undefIsContained())
+      return false;
+    return Set == RHS.getAssumedSet();
+  }
+
+  /// Maximum number of potential values to be tracked.
+  /// This is set by -attributor-max-potential-values command line option
+  static unsigned MaxPotentialValues;
+
+  /// Return empty set as the best state of potential values.
+  static PotentialValuesState getBestState() {
+    return PotentialValuesState(true);
+  }
+
+  static PotentialValuesState getBestState(PotentialValuesState &PVS) {
+    return getBestState();
+  }
+
+  /// Return full set as the worst state of potential values.
+  static PotentialValuesState getWorstState() {
+    return PotentialValuesState(false);
+  }
+
+  /// Union assumed set with the passed value.
+  void unionAssumed(const MemberTy &C) { insert(C); }
+
+  /// Union assumed set with assumed set of the passed state \p PVS.
+  void unionAssumed(const PotentialValuesState &PVS) { unionWith(PVS); }
+
+  /// Union assumed set with an undef value.
+  void unionAssumedWithUndef() { unionWithUndef(); }
+
+  /// "Clamp" this state with \p PVS.
+  PotentialValuesState operator^=(const PotentialValuesState &PVS) {
+    IsValidState ^= PVS.IsValidState;
+    unionAssumed(PVS);
+    return *this;
+  }
+
+  PotentialValuesState operator&=(const PotentialValuesState &PVS) {
+    IsValidState &= PVS.IsValidState;
+    unionAssumed(PVS);
+    return *this;
+  }
+
+private:
+  /// Check the size of this set, and invalidate when the size is no
+  /// less than \p MaxPotentialValues threshold.
+  void checkAndInvalidate() {
+    if (Set.size() >= MaxPotentialValues)
+      indicatePessimisticFixpoint();
+  }
+
+  /// If this state contains both undef and not undef, we can reduce
+  /// undef to the not undef value.
+  void reduceUndefValue() { UndefIsContained = UndefIsContained & Set.empty(); }
+
+  /// Insert an element into this set.
+  void insert(const MemberTy &C) {
+    if (!isValidState())
+      return;
+    Set.insert(C);
+    checkAndInvalidate();
+  }
+
+  /// Take union with R.
+  void unionWith(const PotentialValuesState &R) {
+    /// If this is a full set, do nothing.;
+    if (!isValidState())
+      return;
+    /// If R is full set, change L to a full set.
+    if (!R.isValidState()) {
+      indicatePessimisticFixpoint();
+      return;
+    }
+    for (const MemberTy &C : R.Set)
+      Set.insert(C);
+    UndefIsContained |= R.undefIsContained();
+    reduceUndefValue();
+    checkAndInvalidate();
+  }
+
+  /// Take union with an undef value.
+  void unionWithUndef() {
+    UndefIsContained = true;
+    reduceUndefValue();
+  }
+
+  /// Take intersection with R.
+  void intersectWith(const PotentialValuesState &R) {
+    /// If R is a full set, do nothing.
+    if (!R.isValidState())
+      return;
+    /// If this is a full set, change this to R.
+    if (!isValidState()) {
+      *this = R;
+      return;
+    }
+    SetTy IntersectSet;
+    for (const MemberTy &C : Set) {
+      if (R.Set.count(C))
+        IntersectSet.insert(C);
+    }
+    Set = IntersectSet;
+    UndefIsContained &= R.undefIsContained();
+    reduceUndefValue();
+  }
+
+  /// A helper state which indicate whether this state is valid or not.
+  BooleanState IsValidState;
+
+  /// Container for potential values
+  SetTy Set;
+
+  /// Flag for undef value
+  bool UndefIsContained;
+};
+
+using PotentialConstantIntValuesState = PotentialValuesState<APInt>;
+
+raw_ostream &operator<<(raw_ostream &OS,
+                        const PotentialConstantIntValuesState &R);
+
+/// An abstract interface for potential values analysis.
+///
+/// This AA collects potential values for each IR position.
+/// An assumed set of potential values is initialized with the empty set (the
+/// best state) and it will grow monotonically as we find more potential values
+/// for this position.
+/// The set might be forced to the worst state, that is, to contain every
+/// possible value for this position in 2 cases.
+///   1. We surpassed the \p MaxPotentialValues threshold. This includes the
+///      case that this position is affected (e.g. because of an operation) by a
+///      Value that is in the worst state.
+///   2. We tried to initialize on a Value that we cannot handle (e.g. an
+///      operator we do not currently handle).
+///
+/// TODO: Support values other than constant integers.
+struct AAPotentialValues
+    : public StateWrapper<PotentialConstantIntValuesState, AbstractAttribute> {
+  using Base = StateWrapper<PotentialConstantIntValuesState, AbstractAttribute>;
+  AAPotentialValues(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
+
+  /// See AbstractAttribute::getState(...).
+  PotentialConstantIntValuesState &getState() override { return *this; }
+  const PotentialConstantIntValuesState &getState() const override {
+    return *this;
+  }
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AAPotentialValues &createForPosition(const IRPosition &IRP,
+                                              Attributor &A);
+
+  /// Return assumed constant for the associated value
+  Optional<ConstantInt *>
+  getAssumedConstantInt(Attributor &A,
+                        const Instruction *CtxI = nullptr) const {
+    if (!isValidState())
+      return nullptr;
+    if (getAssumedSet().size() == 1)
+      return cast<ConstantInt>(ConstantInt::get(getAssociatedValue().getType(),
+                                                *(getAssumedSet().begin())));
+    if (getAssumedSet().size() == 0) {
+      if (undefIsContained())
+        return cast<ConstantInt>(
+            ConstantInt::get(getAssociatedValue().getType(), 0));
+      return llvm::None;
+    }
+
+    return nullptr;
+  }
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AAPotentialValues"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is
+  /// AAPotentialValues
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+/// An abstract interface for all noundef attributes.
+struct AANoUndef
+    : public IRAttribute<Attribute::NoUndef,
+                         StateWrapper<BooleanState, AbstractAttribute>> {
+  AANoUndef(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
+
+  /// Return true if we assume that the underlying value is noundef.
+  bool isAssumedNoUndef() const { return getAssumed(); }
+
+  /// Return true if we know that underlying value is noundef.
+  bool isKnownNoUndef() const { return getKnown(); }
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AANoUndef &createForPosition(const IRPosition &IRP, Attributor &A);
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AANoUndef"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is AANoUndef
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  /// Unique ID (due to the unique address)
+  static const char ID;
+};
+
+/// Run options, used by the pass manager.
+enum AttributorRunOption {
+  NONE = 0,
+  MODULE = 1 << 0,
+  CGSCC = 1 << 1,
+  ALL = MODULE | CGSCC
 };
 
 } // end namespace llvm
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/BlockExtractor.h b/linux-x64/clang/include/llvm/Transforms/IPO/BlockExtractor.h
new file mode 100644
index 0000000..deeb5eb
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/BlockExtractor.h
@@ -0,0 +1,25 @@
+//===- BlockExtractor.h - Extracts blocks into their own functions --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass extracts the specified basic blocks from the module into their
+// own functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_BLOCKEXTRACTOR_H
+#define LLVM_TRANSFORMS_IPO_BLOCKEXTRACTOR_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+struct BlockExtractorPass : PassInfoMixin<BlockExtractorPass> {
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_BLOCKEXTRACTOR_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/CalledValuePropagation.h b/linux-x64/clang/include/llvm/Transforms/IPO/CalledValuePropagation.h
index c2626d0..7826337 100644
--- a/linux-x64/clang/include/llvm/Transforms/IPO/CalledValuePropagation.h
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/CalledValuePropagation.h
@@ -19,7 +19,6 @@
 #ifndef LLVM_TRANSFORMS_IPO_CALLEDVALUEPROPAGATION_H
 #define LLVM_TRANSFORMS_IPO_CALLEDVALUEPROPAGATION_H
 
-#include "llvm/IR/Module.h"
 #include "llvm/IR/PassManager.h"
 
 namespace llvm {
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/CrossDSOCFI.h b/linux-x64/clang/include/llvm/Transforms/IPO/CrossDSOCFI.h
index 8440df6..d34a510 100644
--- a/linux-x64/clang/include/llvm/Transforms/IPO/CrossDSOCFI.h
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/CrossDSOCFI.h
@@ -14,7 +14,6 @@
 #ifndef LLVM_TRANSFORMS_IPO_CROSSDSOCFI_H
 #define LLVM_TRANSFORMS_IPO_CROSSDSOCFI_H
 
-#include "llvm/IR/Module.h"
 #include "llvm/IR/PassManager.h"
 
 namespace llvm {
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/DeadArgumentElimination.h b/linux-x64/clang/include/llvm/Transforms/IPO/DeadArgumentElimination.h
index 73797bc..496ceea 100644
--- a/linux-x64/clang/include/llvm/Transforms/IPO/DeadArgumentElimination.h
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/DeadArgumentElimination.h
@@ -128,6 +128,7 @@
   Liveness SurveyUses(const Value *V, UseVector &MaybeLiveUses);
 
   void SurveyFunction(const Function &F);
+  bool IsLive(const RetOrArg &RA);
   void MarkValue(const RetOrArg &RA, Liveness L,
                  const UseVector &MaybeLiveUses);
   void MarkLive(const RetOrArg &RA);
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/ForceFunctionAttrs.h b/linux-x64/clang/include/llvm/Transforms/IPO/ForceFunctionAttrs.h
index 7379009..fd99843 100644
--- a/linux-x64/clang/include/llvm/Transforms/IPO/ForceFunctionAttrs.h
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/ForceFunctionAttrs.h
@@ -13,7 +13,6 @@
 #ifndef LLVM_TRANSFORMS_IPO_FORCEFUNCTIONATTRS_H
 #define LLVM_TRANSFORMS_IPO_FORCEFUNCTIONATTRS_H
 
-#include "llvm/IR/Module.h"
 #include "llvm/IR/PassManager.h"
 
 namespace llvm {
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/FunctionImport.h b/linux-x64/clang/include/llvm/Transforms/IPO/FunctionImport.h
index bbf270c..6eaf82a 100644
--- a/linux-x64/clang/include/llvm/Transforms/IPO/FunctionImport.h
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/FunctionImport.h
@@ -98,15 +98,17 @@
   using ImportMapTy = StringMap<FunctionsToImportTy>;
 
   /// The set contains an entry for every global value the module exports.
-  using ExportSetTy = std::unordered_set<GlobalValue::GUID>;
+  using ExportSetTy = DenseSet<ValueInfo>;
 
   /// A function of this type is used to load modules referenced by the index.
   using ModuleLoaderTy =
       std::function<Expected<std::unique_ptr<Module>>(StringRef Identifier)>;
 
   /// Create a Function Importer.
-  FunctionImporter(const ModuleSummaryIndex &Index, ModuleLoaderTy ModuleLoader)
-      : Index(Index), ModuleLoader(std::move(ModuleLoader)) {}
+  FunctionImporter(const ModuleSummaryIndex &Index, ModuleLoaderTy ModuleLoader,
+                   bool ClearDSOLocalOnDeclarations)
+      : Index(Index), ModuleLoader(std::move(ModuleLoader)),
+        ClearDSOLocalOnDeclarations(ClearDSOLocalOnDeclarations) {}
 
   /// Import functions in Module \p M based on the supplied import list.
   Expected<bool> importFunctions(Module &M, const ImportMapTy &ImportList);
@@ -117,6 +119,10 @@
 
   /// Factory function to load a Module for a given identifier
   ModuleLoaderTy ModuleLoader;
+
+  /// See the comment of ClearDSOLocalOnDeclarations in
+  /// Utils/FunctionImportUtils.h.
+  bool ClearDSOLocalOnDeclarations;
 };
 
 /// The function importing pass
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/GlobalDCE.h b/linux-x64/clang/include/llvm/Transforms/IPO/GlobalDCE.h
index c434484..0a68518 100644
--- a/linux-x64/clang/include/llvm/Transforms/IPO/GlobalDCE.h
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/GlobalDCE.h
@@ -43,11 +43,25 @@
   /// Comdat -> Globals in that Comdat section.
   std::unordered_multimap<Comdat *, GlobalValue *> ComdatMembers;
 
+  /// !type metadata -> set of (vtable, offset) pairs
+  DenseMap<Metadata *, SmallSet<std::pair<GlobalVariable *, uint64_t>, 4>>
+      TypeIdMap;
+
+  // Global variables which are vtables, and which we have enough information
+  // about to safely do dead virtual function elimination.
+  SmallPtrSet<GlobalValue *, 32> VFESafeVTables;
+
   void UpdateGVDependencies(GlobalValue &GV);
   void MarkLive(GlobalValue &GV,
                 SmallVectorImpl<GlobalValue *> *Updates = nullptr);
   bool RemoveUnusedGlobalValue(GlobalValue &GV);
 
+  // Dead virtual function elimination.
+  void AddVirtualFunctionDependencies(Module &M);
+  void ScanVTables(Module &M);
+  void ScanTypeCheckedLoadIntrinsics(Module &M);
+  void ScanVTableLoad(Function *Caller, Metadata *TypeId, uint64_t CallOffset);
+
   void ComputeDependencies(Value *V, SmallPtrSetImpl<GlobalValue *> &U);
 };
 
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/HotColdSplitting.h b/linux-x64/clang/include/llvm/Transforms/IPO/HotColdSplitting.h
index 7366884..8c3049f 100644
--- a/linux-x64/clang/include/llvm/Transforms/IPO/HotColdSplitting.h
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/HotColdSplitting.h
@@ -17,6 +17,45 @@
 namespace llvm {
 
 class Module;
+class ProfileSummaryInfo;
+class BlockFrequencyInfo;
+class TargetTransformInfo;
+class OptimizationRemarkEmitter;
+class AssumptionCache;
+class DominatorTree;
+class CodeExtractorAnalysisCache;
+
+/// A sequence of basic blocks.
+///
+/// A 0-sized SmallVector is slightly cheaper to move than a std::vector.
+using BlockSequence = SmallVector<BasicBlock *, 0>;
+
+class HotColdSplitting {
+public:
+  HotColdSplitting(ProfileSummaryInfo *ProfSI,
+                   function_ref<BlockFrequencyInfo *(Function &)> GBFI,
+                   function_ref<TargetTransformInfo &(Function &)> GTTI,
+                   std::function<OptimizationRemarkEmitter &(Function &)> *GORE,
+                   function_ref<AssumptionCache *(Function &)> LAC)
+      : PSI(ProfSI), GetBFI(GBFI), GetTTI(GTTI), GetORE(GORE), LookupAC(LAC) {}
+  bool run(Module &M);
+
+private:
+  bool isFunctionCold(const Function &F) const;
+  bool shouldOutlineFrom(const Function &F) const;
+  bool outlineColdRegions(Function &F, bool HasProfileSummary);
+  Function *extractColdRegion(const BlockSequence &Region,
+                              const CodeExtractorAnalysisCache &CEAC,
+                              DominatorTree &DT, BlockFrequencyInfo *BFI,
+                              TargetTransformInfo &TTI,
+                              OptimizationRemarkEmitter &ORE,
+                              AssumptionCache *AC, unsigned Count);
+  ProfileSummaryInfo *PSI;
+  function_ref<BlockFrequencyInfo *(Function &)> GetBFI;
+  function_ref<TargetTransformInfo &(Function &)> GetTTI;
+  std::function<OptimizationRemarkEmitter &(Function &)> *GetORE;
+  function_ref<AssumptionCache *(Function &)> LookupAC;
+};
 
 /// Pass to outline cold regions.
 class HotColdSplittingPass : public PassInfoMixin<HotColdSplittingPass> {
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/IROutliner.h b/linux-x64/clang/include/llvm/Transforms/IPO/IROutliner.h
new file mode 100644
index 0000000..0346803
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/IROutliner.h
@@ -0,0 +1,357 @@
+//===- IROutliner.h - Extract similar IR regions into functions ------------==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// \file
+// The interface file for the IROutliner which is used by the IROutliner Pass.
+//
+// The outliner uses the IRSimilarityIdentifier to identify the similar regions
+// of code.  It evaluates each set of IRSimilarityCandidates with an estimate of
+// whether it will provide code size reduction.  Each region is extracted using
+// the code extractor.  These extracted functions are consolidated into a single
+// function and called from the extracted call site.
+//
+// For example:
+// \code
+//   %1 = add i32 %a, %b
+//   %2 = add i32 %b, %a
+//   %3 = add i32 %b, %a
+//   %4 = add i32 %a, %b
+// \endcode
+// would become function
+// \code
+// define internal void outlined_ir_function(i32 %0, i32 %1) {
+//   %1 = add i32 %0, %1
+//   %2 = add i32 %1, %0
+//   ret void
+// }
+// \endcode
+// with calls:
+// \code
+//   call void outlined_ir_function(i32 %a, i32 %b)
+//   call void outlined_ir_function(i32 %b, i32 %a)
+// \endcode
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_IROUTLINER_H
+#define LLVM_TRANSFORMS_IPO_IROUTLINER_H
+
+#include "llvm/Analysis/IRSimilarityIdentifier.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/ValueMap.h"
+#include "llvm/Transforms/Utils/CodeExtractor.h"
+#include <set>
+
+struct OutlinableGroup;
+
+namespace llvm {
+using namespace IRSimilarity;
+
+class Module;
+class TargetTransformInfo;
+class OptimizationRemarkEmitter;
+
+/// The OutlinableRegion holds all the information for a specific region, or
+/// sequence of instructions. This includes what values need to be hoisted to
+/// arguments from the extracted function, inputs and outputs to the region, and
+/// mapping from the extracted function arguments to overall function arguments.
+struct OutlinableRegion {
+  /// Describes the region of code.
+  IRSimilarityCandidate *Candidate;
+
+  /// If this region is outlined, the front and back IRInstructionData could
+  /// potentially become invalidated if the only new instruction is a call.
+  /// This ensures that we replace in the instruction in the IRInstructionData.
+  IRInstructionData *NewFront = nullptr;
+  IRInstructionData *NewBack = nullptr;
+
+  /// The number of extracted inputs from the CodeExtractor.
+  unsigned NumExtractedInputs;
+
+  /// The corresponding BasicBlock with the appropriate stores for this
+  /// OutlinableRegion in the overall function.
+  unsigned OutputBlockNum;
+
+  /// Mapping the extracted argument number to the argument number in the
+  /// overall function.  Since there will be inputs, such as elevated constants
+  /// that are not the same in each region in a SimilarityGroup, or values that
+  /// cannot be sunk into the extracted section in every region, we must keep
+  /// track of which extracted argument maps to which overall argument.
+  DenseMap<unsigned, unsigned> ExtractedArgToAgg;
+  DenseMap<unsigned, unsigned> AggArgToExtracted;
+
+  /// Mapping of the argument number in the deduplicated function
+  /// to a given constant, which is used when creating the arguments to the call
+  /// to the newly created deduplicated function.  This is handled separately
+  /// since the CodeExtractor does not recognize constants.
+  DenseMap<unsigned, Constant *> AggArgToConstant;
+
+  /// The global value numbers that are used as outputs for this section. Once
+  /// extracted, each output will be stored to an output register.  This
+  /// documents the global value numbers that are used in this pattern.
+  SmallVector<unsigned, 4> GVNStores;
+
+  /// Used to create an outlined function.
+  CodeExtractor *CE = nullptr;
+
+  /// The call site of the extracted region.
+  CallInst *Call = nullptr;
+
+  /// The function for the extracted region.
+  Function *ExtractedFunction = nullptr;
+
+  /// Flag for whether we have split out the IRSimilarityCanidate. That is,
+  /// make the region contained the IRSimilarityCandidate its own BasicBlock.
+  bool CandidateSplit = false;
+
+  /// Flag for whether we should not consider this region for extraction.
+  bool IgnoreRegion = false;
+
+  /// The BasicBlock that is before the start of the region BasicBlock,
+  /// only defined when the region has been split.
+  BasicBlock *PrevBB = nullptr;
+
+  /// The BasicBlock that contains the starting instruction of the region.
+  BasicBlock *StartBB = nullptr;
+
+  /// The BasicBlock that contains the ending instruction of the region.
+  BasicBlock *EndBB = nullptr;
+
+  /// The BasicBlock that is after the start of the region BasicBlock,
+  /// only defined when the region has been split.
+  BasicBlock *FollowBB = nullptr;
+
+  /// The Outlinable Group that contains this region and structurally similar
+  /// regions to this region.
+  OutlinableGroup *Parent = nullptr;
+
+  OutlinableRegion(IRSimilarityCandidate &C, OutlinableGroup &Group)
+      : Candidate(&C), Parent(&Group) {
+    StartBB = C.getStartBB();
+    EndBB = C.getEndBB();
+  }
+
+  /// For the contained region, split the parent BasicBlock at the starting and
+  /// ending instructions of the contained IRSimilarityCandidate.
+  void splitCandidate();
+
+  /// For the contained region, reattach the BasicBlock at the starting and
+  /// ending instructions of the contained IRSimilarityCandidate, or if the
+  /// function has been extracted, the start and end of the BasicBlock
+  /// containing the called function.
+  void reattachCandidate();
+
+  /// Get the size of the code removed from the region.
+  ///
+  /// \param [in] TTI - The TargetTransformInfo for the parent function.
+  /// \returns the code size of the region
+  unsigned getBenefit(TargetTransformInfo &TTI);
+};
+
+/// This class is a pass that identifies similarity in a Module, extracts
+/// instances of the similarity, and then consolidating the similar regions
+/// in an effort to reduce code size.  It uses the IRSimilarityIdentifier pass
+/// to identify the similar regions of code, and then extracts the similar
+/// sections into a single function.  See the above for an example as to
+/// how code is extracted and consolidated into a single function.
+class IROutliner {
+public:
+  IROutliner(function_ref<TargetTransformInfo &(Function &)> GTTI,
+             function_ref<IRSimilarityIdentifier &(Module &)> GIRSI,
+             function_ref<OptimizationRemarkEmitter &(Function &)> GORE)
+      : getTTI(GTTI), getIRSI(GIRSI), getORE(GORE) {}
+  bool run(Module &M);
+
+private:
+  /// Find repeated similar code sequences in \p M and outline them into new
+  /// Functions.
+  ///
+  /// \param [in] M - The module to outline from.
+  /// \returns The number of Functions created.
+  unsigned doOutline(Module &M);
+
+  /// Remove all the IRSimilarityCandidates from \p CandidateVec that have
+  /// instructions contained in a previously outlined region and put the
+  /// remaining regions in \p CurrentGroup.
+  ///
+  /// \param [in] CandidateVec - List of similarity candidates for regions with
+  /// the same similarity structure.
+  /// \param [in,out] CurrentGroup - Contains the potential sections to
+  /// be outlined.
+  void
+  pruneIncompatibleRegions(std::vector<IRSimilarityCandidate> &CandidateVec,
+                           OutlinableGroup &CurrentGroup);
+
+  /// Create the function based on the overall types found in the current
+  /// regions being outlined.
+  ///
+  /// \param M - The module to outline from.
+  /// \param [in,out] CG - The OutlinableGroup for the regions to be outlined.
+  /// \param [in] FunctionNameSuffix - How many functions have we previously
+  /// created.
+  /// \returns the newly created function.
+  Function *createFunction(Module &M, OutlinableGroup &CG,
+                           unsigned FunctionNameSuffix);
+
+  /// Identify the needed extracted inputs in a section, and add to the overall
+  /// function if needed.
+  ///
+  /// \param [in] M - The module to outline from.
+  /// \param [in,out] Region - The region to be extracted.
+  /// \param [in] NotSame - The global value numbers of the Values in the region
+  /// that do not have the same Constant in each strucutrally similar region.
+  void findAddInputsOutputs(Module &M, OutlinableRegion &Region,
+                            DenseSet<unsigned> &NotSame);
+
+  /// Find the number of instructions that will be removed by extracting the
+  /// OutlinableRegions in \p CurrentGroup.
+  ///
+  /// \param [in] CurrentGroup - The collection of OutlinableRegions to be
+  /// analyzed.
+  /// \returns the number of outlined instructions across all regions.
+  unsigned findBenefitFromAllRegions(OutlinableGroup &CurrentGroup);
+
+  /// Find the number of instructions that will be added by reloading arguments.
+  ///
+  /// \param [in] CurrentGroup - The collection of OutlinableRegions to be
+  /// analyzed.
+  /// \returns the number of added reload instructions across all regions.
+  unsigned findCostOutputReloads(OutlinableGroup &CurrentGroup);
+
+  /// Find the cost and the benefit of \p CurrentGroup and save it back to
+  /// \p CurrentGroup.
+  ///
+  /// \param [in] M - The module being analyzed
+  /// \param [in,out] CurrentGroup - The overall outlined section
+  void findCostBenefit(Module &M, OutlinableGroup &CurrentGroup);
+
+  /// Update the output mapping based on the load instruction, and the outputs
+  /// of the extracted function.
+  ///
+  /// \param Region - The region extracted
+  /// \param Outputs - The outputs from the extracted function.
+  /// \param LI - The load instruction used to update the mapping.
+  void updateOutputMapping(OutlinableRegion &Region,
+                           ArrayRef<Value *> Outputs, LoadInst *LI);
+
+  /// Extract \p Region into its own function.
+  ///
+  /// \param [in] Region - The region to be extracted into its own function.
+  /// \returns True if it was successfully outlined.
+  bool extractSection(OutlinableRegion &Region);
+
+  /// For the similarities found, and the extracted sections, create a single
+  /// outlined function with appropriate output blocks as necessary.
+  ///
+  /// \param [in] M - The module to outline from
+  /// \param [in] CurrentGroup - The set of extracted sections to consolidate.
+  /// \param [in,out] FuncsToRemove - List of functions to remove from the
+  /// module after outlining is completed.
+  /// \param [in,out] OutlinedFunctionNum - the number of new outlined
+  /// functions.
+  void deduplicateExtractedSections(Module &M, OutlinableGroup &CurrentGroup,
+                                    std::vector<Function *> &FuncsToRemove,
+                                    unsigned &OutlinedFunctionNum);
+
+  /// If true, enables us to outline from functions that have LinkOnceFromODR
+  /// linkages.
+  bool OutlineFromLinkODRs = false;
+
+  /// If false, we do not worry if the cost is greater than the benefit.  This
+  /// is for debugging and testing, so that we can test small cases to ensure
+  /// that the outlining is being done correctly.
+  bool CostModel = true;
+
+  /// The set of outlined Instructions, identified by their location in the
+  /// sequential ordering of instructions in a Module.
+  DenseSet<unsigned> Outlined;
+
+  /// TargetTransformInfo lambda for target specific information.
+  function_ref<TargetTransformInfo &(Function &)> getTTI;
+
+  /// A mapping from newly created reloaded output values to the original value.
+  /// If an value is replace by an output from an outlined region, this maps
+  /// that Value, back to its original Value.
+  DenseMap<Value *, Value *> OutputMappings;
+
+  /// IRSimilarityIdentifier lambda to retrieve IRSimilarityIdentifier.
+  function_ref<IRSimilarityIdentifier &(Module &)> getIRSI;
+
+  /// The optimization remark emitter for the pass.
+  function_ref<OptimizationRemarkEmitter &(Function &)> getORE;
+
+  /// The memory allocator used to allocate the CodeExtractors.
+  SpecificBumpPtrAllocator<CodeExtractor> ExtractorAllocator;
+
+  /// The memory allocator used to allocate the OutlinableRegions.
+  SpecificBumpPtrAllocator<OutlinableRegion> RegionAllocator;
+
+  /// The memory allocator used to allocate new IRInstructionData.
+  SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+
+  /// Custom InstVisitor to classify different instructions for whether it can
+  /// be analyzed for similarity.  This is needed as there may be instruction we
+  /// can identify as having similarity, but are more complicated to outline.
+  struct InstructionAllowed : public InstVisitor<InstructionAllowed, bool> {
+    InstructionAllowed() {}
+
+    // TODO: Determine a scheme to resolve when the label is similar enough.
+    bool visitBranchInst(BranchInst &BI) { return false; }
+    // TODO: Determine a scheme to resolve when the labels are similar enough.
+    bool visitPHINode(PHINode &PN) { return false; }
+    // TODO: Handle allocas.
+    bool visitAllocaInst(AllocaInst &AI) { return false; }
+    // VAArg instructions are not allowed since this could cause difficulty when
+    // differentiating between different sets of variable instructions in
+    // the deduplicated outlined regions.
+    bool visitVAArgInst(VAArgInst &VI) { return false; }
+    // We exclude all exception handling cases since they are so context
+    // dependent.
+    bool visitLandingPadInst(LandingPadInst &LPI) { return false; }
+    bool visitFuncletPadInst(FuncletPadInst &FPI) { return false; }
+    // DebugInfo should be included in the regions, but should not be
+    // analyzed for similarity as it has no bearing on the outcome of the
+    // program.
+    bool visitDbgInfoIntrinsic(DbgInfoIntrinsic &DII) { return true; }
+    // TODO: Handle specific intrinsics individually from those that can be
+    // handled.
+    bool IntrinsicInst(IntrinsicInst &II) { return false; }
+    // We only handle CallInsts that are not indirect, since we cannot guarantee
+    // that they have a name in these cases.
+    bool visitCallInst(CallInst &CI) {
+      Function *F = CI.getCalledFunction();
+      if (!F || CI.isIndirectCall() || !F->hasName())
+        return false;
+      return true;
+    }
+    // TODO: Handle FreezeInsts.  Since a frozen value could be frozen inside
+    // the outlined region, and then returned as an output, this will have to be
+    // handled differently.
+    bool visitFreezeInst(FreezeInst &CI) { return false; }
+    // TODO: We do not current handle similarity that changes the control flow.
+    bool visitInvokeInst(InvokeInst &II) { return false; }
+    // TODO: We do not current handle similarity that changes the control flow.
+    bool visitCallBrInst(CallBrInst &CBI) { return false; }
+    // TODO: Handle interblock similarity.
+    bool visitTerminator(Instruction &I) { return false; }
+    bool visitInstruction(Instruction &I) { return true; }
+  };
+
+  /// A InstVisitor used to exclude certain instructions from being outlined.
+  InstructionAllowed InstructionClassifier;
+};
+
+/// Pass to outline similar regions.
+class IROutlinerPass : public PassInfoMixin<IROutlinerPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_IROUTLINER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/Inliner.h b/linux-x64/clang/include/llvm/Transforms/IPO/Inliner.h
index 8202b94..b6e793a 100644
--- a/linux-x64/clang/include/llvm/Transforms/IPO/Inliner.h
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/Inliner.h
@@ -11,9 +11,9 @@
 
 #include "llvm/Analysis/CGSCCPassManager.h"
 #include "llvm/Analysis/CallGraphSCCPass.h"
+#include "llvm/Analysis/InlineAdvisor.h"
 #include "llvm/Analysis/InlineCost.h"
 #include "llvm/Analysis/LazyCallGraph.h"
-#include "llvm/IR/CallSite.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h"
 #include <utility>
@@ -36,6 +36,8 @@
   /// call the implementation here.
   void getAnalysisUsage(AnalysisUsage &Info) const override;
 
+  using llvm::Pass::doInitialization;
+
   bool doInitialization(CallGraph &CG) override;
 
   /// Main run interface method, this implements the interface required by the
@@ -51,7 +53,7 @@
   /// This method must be implemented by the subclass to determine the cost of
   /// inlining the specified call site.  If the cost returned is greater than
   /// the current inline threshold, the call site is not inlined.
-  virtual InlineCost getInlineCost(CallSite CS) = 0;
+  virtual InlineCost getInlineCost(CallBase &CB) = 0;
 
   /// Remove dead functions.
   ///
@@ -74,6 +76,7 @@
 protected:
   AssumptionCacheTracker *ACT;
   ProfileSummaryInfo *PSI;
+  std::function<const TargetLibraryInfo &(Function &)> GetTLI;
   ImportedFunctionsInliningStatistics ImportedFunctionsStats;
 };
 
@@ -93,21 +96,54 @@
 /// passes be composed to achieve the same end result.
 class InlinerPass : public PassInfoMixin<InlinerPass> {
 public:
-  InlinerPass(InlineParams Params = getInlineParams())
-      : Params(std::move(Params)) {}
+  InlinerPass(bool OnlyMandatory = false) : OnlyMandatory(OnlyMandatory) {}
   ~InlinerPass();
-  InlinerPass(InlinerPass &&Arg)
-      : Params(std::move(Arg.Params)),
-        ImportedFunctionsStats(std::move(Arg.ImportedFunctionsStats)) {}
+  InlinerPass(InlinerPass &&Arg) = default;
 
   PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
                         LazyCallGraph &CG, CGSCCUpdateResult &UR);
 
 private:
-  InlineParams Params;
+  InlineAdvisor &getAdvisor(const ModuleAnalysisManagerCGSCCProxy::Result &MAM,
+                            FunctionAnalysisManager &FAM, Module &M);
   std::unique_ptr<ImportedFunctionsInliningStatistics> ImportedFunctionsStats;
+  std::unique_ptr<DefaultInlineAdvisor> OwnedDefaultAdvisor;
+  const bool OnlyMandatory;
 };
 
+/// Module pass, wrapping the inliner pass. This works in conjunction with the
+/// InlineAdvisorAnalysis to facilitate inlining decisions taking into account
+/// module-wide state, that need to keep track of inter-inliner pass runs, for
+/// a given module. An InlineAdvisor is configured and kept alive for the
+/// duration of the ModuleInlinerWrapperPass::run.
+class ModuleInlinerWrapperPass
+    : public PassInfoMixin<ModuleInlinerWrapperPass> {
+public:
+  ModuleInlinerWrapperPass(
+      InlineParams Params = getInlineParams(), bool Debugging = false,
+      bool MandatoryFirst = true,
+      InliningAdvisorMode Mode = InliningAdvisorMode::Default,
+      unsigned MaxDevirtIterations = 0);
+  ModuleInlinerWrapperPass(ModuleInlinerWrapperPass &&Arg) = default;
+
+  PreservedAnalyses run(Module &, ModuleAnalysisManager &);
+
+  /// Allow adding more CGSCC passes, besides inlining. This should be called
+  /// before run is called, as part of pass pipeline building.
+  CGSCCPassManager &getPM() { return PM; }
+
+  /// Allow adding module-level analyses benefiting the contained CGSCC passes.
+  template <class T> void addRequiredModuleAnalysis() {
+    MPM.addPass(RequireAnalysisPass<T, Module>());
+  }
+
+private:
+  const InlineParams Params;
+  const InliningAdvisorMode Mode;
+  const unsigned MaxDevirtIterations;
+  CGSCCPassManager PM;
+  ModulePassManager MPM;
+};
 } // end namespace llvm
 
 #endif // LLVM_TRANSFORMS_IPO_INLINER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/LoopExtractor.h b/linux-x64/clang/include/llvm/Transforms/IPO/LoopExtractor.h
new file mode 100644
index 0000000..def3c59
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/LoopExtractor.h
@@ -0,0 +1,32 @@
+//===- LoopExtractor.h - Extract each loop into a new function ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A pass wrapper around the ExtractLoop() scalar transformation to extract each
+// top-level loop into its own new function. If the loop is the ONLY loop in a
+// given function, it is not touched. This is a pass most useful for debugging
+// via bugpoint.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_LOOPEXTRACTOR_H
+#define LLVM_TRANSFORMS_IPO_LOOPEXTRACTOR_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+struct LoopExtractorPass : public PassInfoMixin<LoopExtractorPass> {
+  LoopExtractorPass(unsigned NumLoops = ~0) : NumLoops(NumLoops) {}
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+
+private:
+  unsigned NumLoops;
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_LOOPEXTRACTOR_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/LowerTypeTests.h b/linux-x64/clang/include/llvm/Transforms/IPO/LowerTypeTests.h
index 39b23f5..eb682c4 100644
--- a/linux-x64/clang/include/llvm/Transforms/IPO/LowerTypeTests.h
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/LowerTypeTests.h
@@ -193,15 +193,24 @@
                 uint64_t &AllocByteOffset, uint8_t &AllocMask);
 };
 
+bool isJumpTableCanonical(Function *F);
+
 } // end namespace lowertypetests
 
 class LowerTypeTestsPass : public PassInfoMixin<LowerTypeTestsPass> {
+  bool UseCommandLine = false;
+
+  ModuleSummaryIndex *ExportSummary = nullptr;
+  const ModuleSummaryIndex *ImportSummary = nullptr;
+  bool DropTypeTests = true;
+
 public:
-  ModuleSummaryIndex *ExportSummary;
-  const ModuleSummaryIndex *ImportSummary;
+  LowerTypeTestsPass() : UseCommandLine(true) {}
   LowerTypeTestsPass(ModuleSummaryIndex *ExportSummary,
-                     const ModuleSummaryIndex *ImportSummary)
-      : ExportSummary(ExportSummary), ImportSummary(ImportSummary) {}
+                     const ModuleSummaryIndex *ImportSummary,
+                     bool DropTypeTests = false)
+      : ExportSummary(ExportSummary), ImportSummary(ImportSummary),
+        DropTypeTests(DropTypeTests) {}
   PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
 };
 
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/MergeFunctions.h b/linux-x64/clang/include/llvm/Transforms/IPO/MergeFunctions.h
new file mode 100644
index 0000000..822f0fd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/MergeFunctions.h
@@ -0,0 +1,32 @@
+//===- MergeFunctions.h - Merge Identical Functions -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass transforms simple global variables that never have their address
+// taken.  If obviously true, it marks read/write globals as constant, deletes
+// variables only stored to, etc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_MERGEFUNCTIONS_H
+#define LLVM_TRANSFORMS_IPO_MERGEFUNCTIONS_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+/// Merge identical functions.
+class MergeFunctionsPass : public PassInfoMixin<MergeFunctionsPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_MERGEFUNCTIONS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/OpenMPOpt.h b/linux-x64/clang/include/llvm/Transforms/IPO/OpenMPOpt.h
new file mode 100644
index 0000000..9b72ee0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/OpenMPOpt.h
@@ -0,0 +1,76 @@
+//===- IPO/OpenMPOpt.h - Collection of OpenMP optimizations -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_OPENMP_OPT_H
+#define LLVM_TRANSFORMS_IPO_OPENMP_OPT_H
+
+#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+namespace omp {
+
+/// Summary of a kernel (=entry point for target offloading).
+using Kernel = Function *;
+
+/// Helper to remember if the module contains OpenMP (runtime calls), to be used
+/// foremost with containsOpenMP.
+struct OpenMPInModule {
+  OpenMPInModule &operator=(bool Found) {
+    if (Found)
+      Value = OpenMPInModule::OpenMP::FOUND;
+    else
+      Value = OpenMPInModule::OpenMP::NOT_FOUND;
+    return *this;
+  }
+  bool isKnown() { return Value != OpenMP::UNKNOWN; }
+  operator bool() { return Value != OpenMP::NOT_FOUND; }
+
+  /// Does this function \p F contain any OpenMP runtime calls?
+  bool containsOMPRuntimeCalls(Function *F) const {
+    return FuncsWithOMPRuntimeCalls.contains(F);
+  }
+
+  /// Return the known kernels (=GPU entry points) in the module.
+  SmallPtrSetImpl<Kernel> &getKernels() { return Kernels; }
+
+  /// Identify kernels in the module and populate the Kernels set.
+  void identifyKernels(Module &M);
+
+private:
+  enum class OpenMP { FOUND, NOT_FOUND, UNKNOWN } Value = OpenMP::UNKNOWN;
+
+  friend bool containsOpenMP(Module &M, OpenMPInModule &OMPInModule);
+
+  /// In which functions are OpenMP runtime calls present?
+  SmallPtrSet<Function *, 32> FuncsWithOMPRuntimeCalls;
+
+  /// Collection of known kernels (=GPU entry points) in the module.
+  SmallPtrSet<Kernel, 8> Kernels;
+};
+
+/// Helper to determine if \p M contains OpenMP (runtime calls).
+bool containsOpenMP(Module &M, OpenMPInModule &OMPInModule);
+
+} // namespace omp
+
+/// OpenMP optimizations pass.
+class OpenMPOptPass : public PassInfoMixin<OpenMPOptPass> {
+  /// Helper to remember if the module contains OpenMP (runtime calls).
+  omp::OpenMPInModule OMPInModule;
+
+public:
+  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &UR);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_OPENMP_OPT_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/PassManagerBuilder.h b/linux-x64/clang/include/llvm/Transforms/IPO/PassManagerBuilder.h
index 63ff00a..a9928c3 100644
--- a/linux-x64/clang/include/llvm/Transforms/IPO/PassManagerBuilder.h
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/PassManagerBuilder.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_TRANSFORMS_IPO_PASSMANAGERBUILDER_H
 #define LLVM_TRANSFORMS_IPO_PASSMANAGERBUILDER_H
 
+#include "llvm-c/Transforms/PassManagerBuilder.h"
 #include <functional>
 #include <memory>
 #include <string>
@@ -62,6 +63,8 @@
   typedef std::function<void(const PassManagerBuilder &Builder,
                              legacy::PassManagerBase &PM)>
       ExtensionFn;
+  typedef int GlobalExtensionID;
+
   enum ExtensionPointTy {
     /// EP_EarlyAsPossible - This extension point allows adding passes before
     /// any other transformations, allowing them to see the code as it is coming
@@ -153,6 +156,7 @@
 
   bool DisableTailCalls;
   bool DisableUnrollLoops;
+  bool CallGraphProfile;
   bool SLPVectorize;
   bool LoopVectorize;
   bool LoopsInterleaved;
@@ -193,7 +197,17 @@
   /// Adds an extension that will be used by all PassManagerBuilder instances.
   /// This is intended to be used by plugins, to register a set of
   /// optimisations to run automatically.
-  static void addGlobalExtension(ExtensionPointTy Ty, ExtensionFn Fn);
+  ///
+  /// \returns A global extension identifier that can be used to remove the
+  /// extension.
+  static GlobalExtensionID addGlobalExtension(ExtensionPointTy Ty,
+                                              ExtensionFn Fn);
+  /// Removes an extension that was previously added using addGlobalExtension.
+  /// This is also intended to be used by plugins, to remove any extension that
+  /// was previously registered before being unloaded.
+  ///
+  /// \param ExtensionID Identifier of the extension to be removed.
+  static void removeGlobalExtension(GlobalExtensionID ExtensionID);
   void addExtension(ExtensionPointTy Ty, ExtensionFn Fn);
 
 private:
@@ -204,7 +218,6 @@
   void addLateLTOOptimizationPasses(legacy::PassManagerBase &PM);
   void addPGOInstrPasses(legacy::PassManagerBase &MPM, bool IsCS);
   void addFunctionSimplificationPasses(legacy::PassManagerBase &MPM);
-  void addInstructionCombiningPass(legacy::PassManagerBase &MPM) const;
 
 public:
   /// populateFunctionPassManager - This fills in the function pass manager,
@@ -222,12 +235,30 @@
 /// used by optimizer plugins to allow all front ends to transparently use
 /// them.  Create a static instance of this class in your plugin, providing a
 /// private function that the PassManagerBuilder can use to add your passes.
-struct RegisterStandardPasses {
+class RegisterStandardPasses {
+  PassManagerBuilder::GlobalExtensionID ExtensionID;
+
+public:
   RegisterStandardPasses(PassManagerBuilder::ExtensionPointTy Ty,
                          PassManagerBuilder::ExtensionFn Fn) {
-    PassManagerBuilder::addGlobalExtension(Ty, std::move(Fn));
+    ExtensionID = PassManagerBuilder::addGlobalExtension(Ty, std::move(Fn));
+  }
+
+  ~RegisterStandardPasses() {
+    // If the collection holding the global extensions is destroyed after the
+    // plugin is unloaded, the extension has to be removed here. Indeed, the
+    // destructor of the ExtensionFn may reference code in the plugin.
+    PassManagerBuilder::removeGlobalExtension(ExtensionID);
   }
 };
 
+inline PassManagerBuilder *unwrap(LLVMPassManagerBuilderRef P) {
+    return reinterpret_cast<PassManagerBuilder*>(P);
+}
+
+inline LLVMPassManagerBuilderRef wrap(PassManagerBuilder *P) {
+  return reinterpret_cast<LLVMPassManagerBuilderRef>(P);
+}
+
 } // end namespace llvm
 #endif
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/SampleContextTracker.h b/linux-x64/clang/include/llvm/Transforms/IPO/SampleContextTracker.h
new file mode 100644
index 0000000..5b26001
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/SampleContextTracker.h
@@ -0,0 +1,141 @@
+//===- Transforms/IPO/SampleContextTracker.h --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file provides the interface for context-sensitive profile tracker used
+/// by CSSPGO.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_SAMPLECONTEXTTRACKER_H
+#define LLVM_TRANSFORMS_IPO_SAMPLECONTEXTTRACKER_H
+
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/ProfileData/SampleProf.h"
+#include <list>
+#include <map>
+
+using namespace llvm;
+using namespace sampleprof;
+
+namespace llvm {
+
+// Internal trie tree representation used for tracking context tree and sample
+// profiles. The path from root node to a given node represents the context of
+// that nodes' profile.
+class ContextTrieNode {
+public:
+  ContextTrieNode(ContextTrieNode *Parent = nullptr,
+                  StringRef FName = StringRef(),
+                  FunctionSamples *FSamples = nullptr,
+                  LineLocation CallLoc = {0, 0})
+      : ParentContext(Parent), FuncName(FName), FuncSamples(FSamples),
+        CallSiteLoc(CallLoc){};
+  ContextTrieNode *getChildContext(const LineLocation &CallSite,
+                                   StringRef CalleeName);
+  ContextTrieNode *getChildContext(const LineLocation &CallSite);
+  ContextTrieNode *getOrCreateChildContext(const LineLocation &CallSite,
+                                           StringRef CalleeName,
+                                           bool AllowCreate = true);
+
+  ContextTrieNode &moveToChildContext(const LineLocation &CallSite,
+                                      ContextTrieNode &&NodeToMove,
+                                      StringRef ContextStrToRemove,
+                                      bool DeleteNode = true);
+  void removeChildContext(const LineLocation &CallSite, StringRef CalleeName);
+  std::map<uint32_t, ContextTrieNode> &getAllChildContext();
+  const StringRef getFuncName() const;
+  FunctionSamples *getFunctionSamples() const;
+  void setFunctionSamples(FunctionSamples *FSamples);
+  LineLocation getCallSiteLoc() const;
+  ContextTrieNode *getParentContext() const;
+  void setParentContext(ContextTrieNode *Parent);
+  void dump();
+
+private:
+  static uint32_t nodeHash(StringRef ChildName, const LineLocation &Callsite);
+
+  // Map line+discriminator location to child context
+  std::map<uint32_t, ContextTrieNode> AllChildContext;
+
+  // Link to parent context node
+  ContextTrieNode *ParentContext;
+
+  // Function name for current context
+  StringRef FuncName;
+
+  // Function Samples for current context
+  FunctionSamples *FuncSamples;
+
+  // Callsite location in parent context
+  LineLocation CallSiteLoc;
+};
+
+// Profile tracker that manages profiles and its associated context. It
+// provides interfaces used by sample profile loader to query context profile or
+// base profile for given function or location; it also manages context tree
+// manipulation that is needed to accommodate inline decisions so we have
+// accurate post-inline profile for functions. Internally context profiles
+// are organized in a trie, with each node representing profile for specific
+// calling context and the context is identified by path from root to the node.
+class SampleContextTracker {
+public:
+  SampleContextTracker(StringMap<FunctionSamples> &Profiles);
+  // Query context profile for a specific callee with given name at a given
+  // call-site. The full context is identified by location of call instruction.
+  FunctionSamples *getCalleeContextSamplesFor(const CallBase &Inst,
+                                              StringRef CalleeName);
+  // Query context profile for a given location. The full context
+  // is identified by input DILocation.
+  FunctionSamples *getContextSamplesFor(const DILocation *DIL);
+  // Query context profile for a given sample contxt of a function.
+  FunctionSamples *getContextSamplesFor(const SampleContext &Context);
+  // Query base profile for a given function. A base profile is a merged view
+  // of all context profiles for contexts that are not inlined.
+  FunctionSamples *getBaseSamplesFor(const Function &Func,
+                                     bool MergeContext = true);
+  // Query base profile for a given function by name.
+  FunctionSamples *getBaseSamplesFor(StringRef Name, bool MergeContext);
+  // Mark a context profile as inlined when function is inlined.
+  // This makes sure that inlined context profile will be excluded in
+  // function's base profile.
+  void markContextSamplesInlined(const FunctionSamples *InlinedSamples);
+  // Dump the internal context profile trie.
+  void dump();
+
+private:
+  ContextTrieNode *getContextFor(const DILocation *DIL);
+  ContextTrieNode *getContextFor(const SampleContext &Context);
+  ContextTrieNode *getCalleeContextFor(const DILocation *DIL,
+                                       StringRef CalleeName);
+  ContextTrieNode *getOrCreateContextPath(const SampleContext &Context,
+                                          bool AllowCreate);
+  ContextTrieNode *getTopLevelContextNode(StringRef FName);
+  ContextTrieNode &addTopLevelContextNode(StringRef FName);
+  ContextTrieNode &promoteMergeContextSamplesTree(ContextTrieNode &NodeToPromo);
+  void promoteMergeContextSamplesTree(const Instruction &Inst,
+                                      StringRef CalleeName);
+  void mergeContextNode(ContextTrieNode &FromNode, ContextTrieNode &ToNode,
+                        StringRef ContextStrToRemove);
+  ContextTrieNode &promoteMergeContextSamplesTree(ContextTrieNode &FromNode,
+                                                  ContextTrieNode &ToNodeParent,
+                                                  StringRef ContextStrToRemove);
+
+  // Map from function name to context profiles (excluding base profile)
+  StringMap<SmallSet<FunctionSamples *, 16>> FuncToCtxtProfileSet;
+
+  // Root node for context trie tree
+  ContextTrieNode RootContext;
+};
+
+} // end namespace llvm
+#endif // LLVM_TRANSFORMS_IPO_SAMPLECONTEXTTRACKER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/SampleProfile.h b/linux-x64/clang/include/llvm/Transforms/IPO/SampleProfile.h
index a5ad445..3d929b9 100644
--- a/linux-x64/clang/include/llvm/Transforms/IPO/SampleProfile.h
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/SampleProfile.h
@@ -24,17 +24,18 @@
 /// The sample profiler data loader pass.
 class SampleProfileLoaderPass : public PassInfoMixin<SampleProfileLoaderPass> {
 public:
-  SampleProfileLoaderPass(std::string File = "", std::string RemappingFile = "",
-                          bool IsThinLTOPreLink = false)
+  SampleProfileLoaderPass(
+      std::string File = "", std::string RemappingFile = "",
+      ThinOrFullLTOPhase LTOPhase = ThinOrFullLTOPhase::None)
       : ProfileFileName(File), ProfileRemappingFileName(RemappingFile),
-        IsThinLTOPreLink(IsThinLTOPreLink) {}
+        LTOPhase(LTOPhase) {}
 
   PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
 
 private:
   std::string ProfileFileName;
   std::string ProfileRemappingFileName;
-  bool IsThinLTOPreLink;
+  ThinOrFullLTOPhase LTOPhase;
 };
 
 } // end namespace llvm
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/SampleProfileProbe.h b/linux-x64/clang/include/llvm/Transforms/IPO/SampleProfileProbe.h
new file mode 100644
index 0000000..78117fd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/SampleProfileProbe.h
@@ -0,0 +1,106 @@
+//===- Transforms/IPO/SampleProfileProbe.h ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file provides the interface for the pseudo probe implementation for
+/// AutoFDO.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_SAMPLEPROFILEPROBE_H
+#define LLVM_TRANSFORMS_IPO_SAMPLEPROFILEPROBE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/PseudoProbe.h"
+#include "llvm/ProfileData/SampleProf.h"
+#include "llvm/Target/TargetMachine.h"
+#include <unordered_map>
+
+namespace llvm {
+
+class Module;
+
+using namespace sampleprof;
+using BlockIdMap = std::unordered_map<BasicBlock *, uint32_t>;
+using InstructionIdMap = std::unordered_map<Instruction *, uint32_t>;
+
+enum class PseudoProbeReservedId { Invalid = 0, Last = Invalid };
+
+class PseudoProbeDescriptor {
+  uint64_t FunctionGUID;
+  uint64_t FunctionHash;
+
+public:
+  PseudoProbeDescriptor(uint64_t GUID, uint64_t Hash)
+      : FunctionGUID(GUID), FunctionHash(Hash) {}
+  uint64_t getFunctionGUID() const { return FunctionGUID; }
+  uint64_t getFunctionHash() const { return FunctionHash; }
+};
+
+// This class serves sample counts correlation for SampleProfileLoader by
+// analyzing pseudo probes and their function descriptors injected by
+// SampleProfileProber.
+class PseudoProbeManager {
+  DenseMap<uint64_t, PseudoProbeDescriptor> GUIDToProbeDescMap;
+
+  const PseudoProbeDescriptor *getDesc(const Function &F) const;
+
+public:
+  PseudoProbeManager(const Module &M);
+  bool moduleIsProbed(const Module &M) const;
+  bool profileIsValid(const Function &F, const FunctionSamples &Samples) const;
+};
+
+/// Sample profile pseudo prober.
+///
+/// Insert pseudo probes for block sampling and value sampling.
+class SampleProfileProber {
+public:
+  // Give an empty module id when the prober is not used for instrumentation.
+  SampleProfileProber(Function &F, const std::string &CurModuleUniqueId);
+  void instrumentOneFunc(Function &F, TargetMachine *TM);
+
+private:
+  Function *getFunction() const { return F; }
+  uint64_t getFunctionHash() const { return FunctionHash; }
+  uint32_t getBlockId(const BasicBlock *BB) const;
+  uint32_t getCallsiteId(const Instruction *Call) const;
+  void computeCFGHash();
+  void computeProbeIdForBlocks();
+  void computeProbeIdForCallsites();
+
+  Function *F;
+
+  /// The current module ID that is used to name a static object as a comdat
+  /// group.
+  std::string CurModuleUniqueId;
+
+  /// A CFG hash code used to identify a function code changes.
+  uint64_t FunctionHash;
+
+  /// Map basic blocks to the their pseudo probe ids.
+  BlockIdMap BlockProbeIds;
+
+  /// Map indirect calls to the their pseudo probe ids.
+  InstructionIdMap CallProbeIds;
+
+  /// The ID of the last probe, Can be used to number a new probe.
+  uint32_t LastProbeId;
+};
+
+class SampleProfileProbePass : public PassInfoMixin<SampleProfileProbePass> {
+  TargetMachine *TM;
+
+public:
+  SampleProfileProbePass(TargetMachine *TM) : TM(TM) {}
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+#endif // LLVM_TRANSFORMS_IPO_SAMPLEPROFILEPROBE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/StripSymbols.h b/linux-x64/clang/include/llvm/Transforms/IPO/StripSymbols.h
new file mode 100644
index 0000000..dd76d48
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/StripSymbols.h
@@ -0,0 +1,47 @@
+//===- StripSymbols.h - Strip symbols and debug info from a module --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// The StripSymbols transformation implements code stripping. Specifically, it
+// can delete:
+//
+//   * names for virtual registers
+//   * symbols for internal globals and functions
+//   * debug information
+//
+// Note that this transformation makes code much less readable, so it should
+// only be used in situations where the 'strip' utility would be used, such as
+// reducing code size or making it harder to reverse engineer code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_STRIPSYMBOLS_H
+#define LLVM_TRANSFORMS_IPO_STRIPSYMBOLS_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+struct StripSymbolsPass : PassInfoMixin<StripSymbolsPass> {
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+struct StripNonDebugSymbolsPass : PassInfoMixin<StripNonDebugSymbolsPass> {
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+struct StripDebugDeclarePass : PassInfoMixin<StripDebugDeclarePass> {
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+struct StripDeadDebugInfoPass : PassInfoMixin<StripDeadDebugInfoPass> {
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_STRIPSYMBOLS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/SyntheticCountsPropagation.h b/linux-x64/clang/include/llvm/Transforms/IPO/SyntheticCountsPropagation.h
index 0b3ba86..0637d62 100644
--- a/linux-x64/clang/include/llvm/Transforms/IPO/SyntheticCountsPropagation.h
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/SyntheticCountsPropagation.h
@@ -1,13 +1,17 @@
+//=- SyntheticCountsPropagation.h - Propagate function counts -----*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
 #ifndef LLVM_TRANSFORMS_IPO_SYNTHETIC_COUNTS_PROPAGATION_H
 #define LLVM_TRANSFORMS_IPO_SYNTHETIC_COUNTS_PROPAGATION_H
 
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/IR/CallSite.h"
 #include "llvm/IR/PassManager.h"
-#include "llvm/Support/ScaledNumber.h"
 
 namespace llvm {
-class Function;
 class Module;
 
 class SyntheticCountsPropagation
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/WholeProgramDevirt.h b/linux-x64/clang/include/llvm/Transforms/IPO/WholeProgramDevirt.h
index 509fcc8..6e92f8f 100644
--- a/linux-x64/clang/include/llvm/Transforms/IPO/WholeProgramDevirt.h
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/WholeProgramDevirt.h
@@ -16,8 +16,10 @@
 
 #include "llvm/IR/Module.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/IPO/FunctionImport.h"
 #include <cassert>
 #include <cstdint>
+#include <set>
 #include <utility>
 #include <vector>
 
@@ -28,6 +30,7 @@
 class Function;
 class GlobalVariable;
 class ModuleSummaryIndex;
+struct ValueInfo;
 
 namespace wholeprogramdevirt {
 
@@ -220,6 +223,9 @@
 struct WholeProgramDevirtPass : public PassInfoMixin<WholeProgramDevirtPass> {
   ModuleSummaryIndex *ExportSummary;
   const ModuleSummaryIndex *ImportSummary;
+  bool UseCommandLine = false;
+  WholeProgramDevirtPass()
+      : ExportSummary(nullptr), ImportSummary(nullptr), UseCommandLine(true) {}
   WholeProgramDevirtPass(ModuleSummaryIndex *ExportSummary,
                          const ModuleSummaryIndex *ImportSummary)
       : ExportSummary(ExportSummary), ImportSummary(ImportSummary) {
@@ -228,6 +234,34 @@
   PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
 };
 
+struct VTableSlotSummary {
+  StringRef TypeID;
+  uint64_t ByteOffset;
+};
+
+void updateVCallVisibilityInModule(Module &M,
+                                   bool WholeProgramVisibilityEnabledInLTO);
+void updateVCallVisibilityInIndex(ModuleSummaryIndex &Index,
+                                  bool WholeProgramVisibilityEnabledInLTO);
+
+/// Perform index-based whole program devirtualization on the \p Summary
+/// index. Any devirtualized targets used by a type test in another module
+/// are added to the \p ExportedGUIDs set. For any local devirtualized targets
+/// only used within the defining module, the information necessary for
+/// locating the corresponding WPD resolution is recorded for the ValueInfo
+/// in case it is exported by cross module importing (in which case the
+/// devirtualized target name will need adjustment).
+void runWholeProgramDevirtOnIndex(
+    ModuleSummaryIndex &Summary, std::set<GlobalValue::GUID> &ExportedGUIDs,
+    std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap);
+
+/// Call after cross-module importing to update the recorded single impl
+/// devirt target names for any locals that were exported.
+void updateIndexWPDForExports(
+    ModuleSummaryIndex &Summary,
+    function_ref<bool(StringRef, ValueInfo)> isExported,
+    std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap);
+
 } // end namespace llvm
 
 #endif // LLVM_TRANSFORMS_IPO_WHOLEPROGRAMDEVIRT_H
diff --git a/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombine.h b/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombine.h
index 8894d96..0ad4f54 100644
--- a/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombine.h
+++ b/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombine.h
@@ -24,13 +24,13 @@
 
 class InstCombinePass : public PassInfoMixin<InstCombinePass> {
   InstCombineWorklist Worklist;
-  bool ExpensiveCombines;
+  const unsigned MaxIterations;
 
 public:
   static StringRef name() { return "InstCombinePass"; }
 
-  explicit InstCombinePass(bool ExpensiveCombines = true)
-      : ExpensiveCombines(ExpensiveCombines) {}
+  explicit InstCombinePass();
+  explicit InstCombinePass(unsigned MaxIterations);
 
   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 };
@@ -41,15 +41,13 @@
 /// will try to combine all instructions in the function.
 class InstructionCombiningPass : public FunctionPass {
   InstCombineWorklist Worklist;
-  const bool ExpensiveCombines;
+  const unsigned MaxIterations;
 
 public:
   static char ID; // Pass identification, replacement for typeid
 
-  InstructionCombiningPass(bool ExpensiveCombines = true)
-      : FunctionPass(ID), ExpensiveCombines(ExpensiveCombines) {
-    initializeInstructionCombiningPassPass(*PassRegistry::getPassRegistry());
-  }
+  explicit InstructionCombiningPass();
+  explicit InstructionCombiningPass(unsigned MaxIterations);
 
   void getAnalysisUsage(AnalysisUsage &AU) const override;
   bool runOnFunction(Function &F) override;
@@ -67,7 +65,8 @@
 // into:
 //    %Z = add int 2, %X
 //
-FunctionPass *createInstructionCombiningPass(bool ExpensiveCombines = true);
+FunctionPass *createInstructionCombiningPass();
+FunctionPass *createInstructionCombiningPass(unsigned MaxIterations);
 }
 
 #endif
diff --git a/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombineWorklist.h b/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
index 6c33bdb..25aabe1 100644
--- a/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
+++ b/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
@@ -11,6 +11,7 @@
 
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/IR/Instruction.h"
 #include "llvm/Support/Compiler.h"
@@ -24,8 +25,12 @@
 /// InstCombineWorklist - This is the worklist management logic for
 /// InstCombine.
 class InstCombineWorklist {
-  SmallVector<Instruction*, 256> Worklist;
-  DenseMap<Instruction*, unsigned> WorklistMap;
+  SmallVector<Instruction *, 256> Worklist;
+  DenseMap<Instruction *, unsigned> WorklistMap;
+  /// These instructions will be added in reverse order after the current
+  /// combine has finished. This means that these instructions will be visited
+  /// in the order they have been added.
+  SmallSetVector<Instruction *, 16> Deferred;
 
 public:
   InstCombineWorklist() = default;
@@ -33,69 +38,83 @@
   InstCombineWorklist(InstCombineWorklist &&) = default;
   InstCombineWorklist &operator=(InstCombineWorklist &&) = default;
 
-  bool isEmpty() const { return Worklist.empty(); }
+  bool isEmpty() const { return Worklist.empty() && Deferred.empty(); }
 
-  /// Add - Add the specified instruction to the worklist if it isn't already
-  /// in it.
-  void Add(Instruction *I) {
+  /// Add instruction to the worklist.
+  /// Instructions will be visited in the order they are added.
+  /// You likely want to use this method.
+  void add(Instruction *I) {
+    if (Deferred.insert(I))
+      LLVM_DEBUG(dbgs() << "IC: ADD DEFERRED: " << *I << '\n');
+  }
+
+  /// Add value to the worklist if it is an instruction.
+  /// Instructions will be visited in the order they are added.
+  void addValue(Value *V) {
+    if (Instruction *I = dyn_cast<Instruction>(V))
+      add(I);
+  }
+
+  /// Push the instruction onto the worklist stack.
+  /// Instructions that have been added first will be visited last.
+  void push(Instruction *I) {
+    assert(I);
+    assert(I->getParent() && "Instruction not inserted yet?");
+
     if (WorklistMap.insert(std::make_pair(I, Worklist.size())).second) {
       LLVM_DEBUG(dbgs() << "IC: ADD: " << *I << '\n');
       Worklist.push_back(I);
     }
   }
 
-  void AddValue(Value *V) {
+  void pushValue(Value *V) {
     if (Instruction *I = dyn_cast<Instruction>(V))
-      Add(I);
+      push(I);
   }
 
-  /// AddInitialGroup - Add the specified batch of stuff in reverse order.
-  /// which should only be done when the worklist is empty and when the group
-  /// has no duplicates.
-  void AddInitialGroup(ArrayRef<Instruction *> List) {
-    assert(Worklist.empty() && "Worklist must be empty to add initial group");
-    Worklist.reserve(List.size()+16);
-    WorklistMap.reserve(List.size());
-    LLVM_DEBUG(dbgs() << "IC: ADDING: " << List.size()
-                      << " instrs to worklist\n");
-    unsigned Idx = 0;
-    for (Instruction *I : reverse(List)) {
-      WorklistMap.insert(std::make_pair(I, Idx++));
-      Worklist.push_back(I);
-    }
+  Instruction *popDeferred() {
+    if (Deferred.empty())
+      return nullptr;
+    return Deferred.pop_back_val();
   }
 
-  // Remove - remove I from the worklist if it exists.
-  void Remove(Instruction *I) {
+  void reserve(size_t Size) {
+    Worklist.reserve(Size + 16);
+    WorklistMap.reserve(Size);
+  }
+
+  /// Remove I from the worklist if it exists.
+  void remove(Instruction *I) {
     DenseMap<Instruction*, unsigned>::iterator It = WorklistMap.find(I);
-    if (It == WorklistMap.end()) return; // Not in worklist.
+    if (It != WorklistMap.end()) {
+      // Don't bother moving everything down, just null out the slot.
+      Worklist[It->second] = nullptr;
+      WorklistMap.erase(It);
+    }
 
-    // Don't bother moving everything down, just null out the slot.
-    Worklist[It->second] = nullptr;
-
-    WorklistMap.erase(It);
+    Deferred.remove(I);
   }
 
-  Instruction *RemoveOne() {
+  Instruction *removeOne() {
+    if (Worklist.empty())
+      return nullptr;
     Instruction *I = Worklist.pop_back_val();
     WorklistMap.erase(I);
     return I;
   }
 
-  /// AddUsersToWorkList - When an instruction is simplified, add all users of
-  /// the instruction to the work lists because they might get more simplified
-  /// now.
-  ///
-  void AddUsersToWorkList(Instruction &I) {
+  /// When an instruction is simplified, add all users of the instruction
+  /// to the work lists because they might get more simplified now.
+  void pushUsersToWorkList(Instruction &I) {
     for (User *U : I.users())
-      Add(cast<Instruction>(U));
+      push(cast<Instruction>(U));
   }
 
 
-  /// Zap - check that the worklist is empty and nuke the backing store for
-  /// the map if it is large.
-  void Zap() {
+  /// Check that the worklist is empty and nuke the backing store for the map.
+  void zap() {
     assert(WorklistMap.empty() && "Worklist empty, but map not?");
+    assert(Deferred.empty() && "Deferred instructions left over");
 
     // Do an explicit clear, this shrinks the map if needed.
     WorklistMap.clear();
diff --git a/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombiner.h b/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombiner.h
new file mode 100644
index 0000000..a5aed72
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombiner.h
@@ -0,0 +1,529 @@
+//===- InstCombiner.h - InstCombine implementation --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides the interface for the instcombine pass implementation.
+/// The interface is used for generic transformations in this folder and
+/// target specific combinations in the targets.
+/// The visitor implementation is in \c InstCombinerImpl in
+/// \c InstCombineInternal.h.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINER_H
+#define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINER_H
+
+#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/TargetFolder.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/KnownBits.h"
+#include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
+#include <cassert>
+
+#define DEBUG_TYPE "instcombine"
+
+namespace llvm {
+
+class AAResults;
+class AssumptionCache;
+class ProfileSummaryInfo;
+class TargetLibraryInfo;
+class TargetTransformInfo;
+
+/// The core instruction combiner logic.
+///
+/// This class provides both the logic to recursively visit instructions and
+/// combine them.
+class LLVM_LIBRARY_VISIBILITY InstCombiner {
+  /// Only used to call target specific inst combining.
+  TargetTransformInfo &TTI;
+
+public:
+  /// Maximum size of array considered when transforming.
+  uint64_t MaxArraySizeForCombine = 0;
+
+  /// An IRBuilder that automatically inserts new instructions into the
+  /// worklist.
+  using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>;
+  BuilderTy &Builder;
+
+protected:
+  /// A worklist of the instructions that need to be simplified.
+  InstCombineWorklist &Worklist;
+
+  // Mode in which we are running the combiner.
+  const bool MinimizeSize;
+
+  AAResults *AA;
+
+  // Required analyses.
+  AssumptionCache &AC;
+  TargetLibraryInfo &TLI;
+  DominatorTree &DT;
+  const DataLayout &DL;
+  const SimplifyQuery SQ;
+  OptimizationRemarkEmitter &ORE;
+  BlockFrequencyInfo *BFI;
+  ProfileSummaryInfo *PSI;
+
+  // Optional analyses. When non-null, these can both be used to do better
+  // combining and will be updated to reflect any changes.
+  LoopInfo *LI;
+
+  bool MadeIRChange = false;
+
+public:
+  InstCombiner(InstCombineWorklist &Worklist, BuilderTy &Builder,
+               bool MinimizeSize, AAResults *AA, AssumptionCache &AC,
+               TargetLibraryInfo &TLI, TargetTransformInfo &TTI,
+               DominatorTree &DT, OptimizationRemarkEmitter &ORE,
+               BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
+               const DataLayout &DL, LoopInfo *LI)
+      : TTI(TTI), Builder(Builder), Worklist(Worklist),
+        MinimizeSize(MinimizeSize), AA(AA), AC(AC), TLI(TLI), DT(DT), DL(DL),
+        SQ(DL, &TLI, &DT, &AC), ORE(ORE), BFI(BFI), PSI(PSI), LI(LI) {}
+
+  virtual ~InstCombiner() {}
+
+  /// Return the source operand of a potentially bitcasted value while
+  /// optionally checking if it has one use. If there is no bitcast or the one
+  /// use check is not met, return the input value itself.
+  static Value *peekThroughBitcast(Value *V, bool OneUseOnly = false) {
+    if (auto *BitCast = dyn_cast<BitCastInst>(V))
+      if (!OneUseOnly || BitCast->hasOneUse())
+        return BitCast->getOperand(0);
+
+    // V is not a bitcast or V has more than one use and OneUseOnly is true.
+    return V;
+  }
+
+  /// Assign a complexity or rank value to LLVM Values. This is used to reduce
+  /// the amount of pattern matching needed for compares and commutative
+  /// instructions. For example, if we have:
+  ///   icmp ugt X, Constant
+  /// or
+  ///   xor (add X, Constant), cast Z
+  ///
+  /// We do not have to consider the commuted variants of these patterns because
+  /// canonicalization based on complexity guarantees the above ordering.
+  ///
+  /// This routine maps IR values to various complexity ranks:
+  ///   0 -> undef
+  ///   1 -> Constants
+  ///   2 -> Other non-instructions
+  ///   3 -> Arguments
+  ///   4 -> Cast and (f)neg/not instructions
+  ///   5 -> Other instructions
+  static unsigned getComplexity(Value *V) {
+    if (isa<Instruction>(V)) {
+      if (isa<CastInst>(V) || match(V, m_Neg(PatternMatch::m_Value())) ||
+          match(V, m_Not(PatternMatch::m_Value())) ||
+          match(V, m_FNeg(PatternMatch::m_Value())))
+        return 4;
+      return 5;
+    }
+    if (isa<Argument>(V))
+      return 3;
+    return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2;
+  }
+
+  /// Predicate canonicalization reduces the number of patterns that need to be
+  /// matched by other transforms. For example, we may swap the operands of a
+  /// conditional branch or select to create a compare with a canonical
+  /// (inverted) predicate which is then more likely to be matched with other
+  /// values.
+  static bool isCanonicalPredicate(CmpInst::Predicate Pred) {
+    switch (Pred) {
+    case CmpInst::ICMP_NE:
+    case CmpInst::ICMP_ULE:
+    case CmpInst::ICMP_SLE:
+    case CmpInst::ICMP_UGE:
+    case CmpInst::ICMP_SGE:
+    // TODO: There are 16 FCMP predicates. Should others be (not) canonical?
+    case CmpInst::FCMP_ONE:
+    case CmpInst::FCMP_OLE:
+    case CmpInst::FCMP_OGE:
+      return false;
+    default:
+      return true;
+    }
+  }
+
+  /// Given an exploded icmp instruction, return true if the comparison only
+  /// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if
+  /// the result of the comparison is true when the input value is signed.
+  static bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
+                             bool &TrueIfSigned) {
+    switch (Pred) {
+    case ICmpInst::ICMP_SLT: // True if LHS s< 0
+      TrueIfSigned = true;
+      return RHS.isNullValue();
+    case ICmpInst::ICMP_SLE: // True if LHS s<= -1
+      TrueIfSigned = true;
+      return RHS.isAllOnesValue();
+    case ICmpInst::ICMP_SGT: // True if LHS s> -1
+      TrueIfSigned = false;
+      return RHS.isAllOnesValue();
+    case ICmpInst::ICMP_SGE: // True if LHS s>= 0
+      TrueIfSigned = false;
+      return RHS.isNullValue();
+    case ICmpInst::ICMP_UGT:
+      // True if LHS u> RHS and RHS == sign-bit-mask - 1
+      TrueIfSigned = true;
+      return RHS.isMaxSignedValue();
+    case ICmpInst::ICMP_UGE:
+      // True if LHS u>= RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc)
+      TrueIfSigned = true;
+      return RHS.isMinSignedValue();
+    case ICmpInst::ICMP_ULT:
+      // True if LHS u< RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc)
+      TrueIfSigned = false;
+      return RHS.isMinSignedValue();
+    case ICmpInst::ICMP_ULE:
+      // True if LHS u<= RHS and RHS == sign-bit-mask - 1
+      TrueIfSigned = false;
+      return RHS.isMaxSignedValue();
+    default:
+      return false;
+    }
+  }
+
+  /// Add one to a Constant
+  static Constant *AddOne(Constant *C) {
+    return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
+  }
+
+  /// Subtract one from a Constant
+  static Constant *SubOne(Constant *C) {
+    return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
+  }
+
+  llvm::Optional<std::pair<
+      CmpInst::Predicate,
+      Constant *>> static getFlippedStrictnessPredicateAndConstant(CmpInst::
+                                                                       Predicate
+                                                                           Pred,
+                                                                   Constant *C);
+
+  static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI) {
+    // a ? b : false and a ? true : b are the canonical form of logical and/or.
+    // This includes !a ? b : false and !a ? true : b. Absorbing the not into
+    // the select by swapping operands would break recognition of this pattern
+    // in other analyses, so don't do that.
+    return match(&SI, PatternMatch::m_LogicalAnd(PatternMatch::m_Value(),
+                                                 PatternMatch::m_Value())) ||
+           match(&SI, PatternMatch::m_LogicalOr(PatternMatch::m_Value(),
+                                                PatternMatch::m_Value()));
+  }
+
+  /// Return true if the specified value is free to invert (apply ~ to).
+  /// This happens in cases where the ~ can be eliminated.  If WillInvertAllUses
+  /// is true, work under the assumption that the caller intends to remove all
+  /// uses of V and only keep uses of ~V.
+  ///
+  /// See also: canFreelyInvertAllUsersOf()
+  static bool isFreeToInvert(Value *V, bool WillInvertAllUses) {
+    // ~(~(X)) -> X.
+    if (match(V, m_Not(PatternMatch::m_Value())))
+      return true;
+
+    // Constants can be considered to be not'ed values.
+    if (match(V, PatternMatch::m_AnyIntegralConstant()))
+      return true;
+
+    // Compares can be inverted if all of their uses are being modified to use
+    // the ~V.
+    if (isa<CmpInst>(V))
+      return WillInvertAllUses;
+
+    // If `V` is of the form `A + Constant` then `-1 - V` can be folded into
+    // `(-1 - Constant) - A` if we are willing to invert all of the uses.
+    if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V))
+      if (BO->getOpcode() == Instruction::Add ||
+          BO->getOpcode() == Instruction::Sub)
+        if (isa<Constant>(BO->getOperand(0)) ||
+            isa<Constant>(BO->getOperand(1)))
+          return WillInvertAllUses;
+
+    // Selects with invertible operands are freely invertible
+    if (match(V,
+              m_Select(PatternMatch::m_Value(), m_Not(PatternMatch::m_Value()),
+                       m_Not(PatternMatch::m_Value()))))
+      return WillInvertAllUses;
+
+    return false;
+  }
+
+  /// Given i1 V, can every user of V be freely adapted if V is changed to !V ?
+  /// InstCombine's canonicalizeICmpPredicate() must be kept in sync with this
+  /// fn.
+  ///
+  /// See also: isFreeToInvert()
+  static bool canFreelyInvertAllUsersOf(Value *V, Value *IgnoredUser) {
+    // Look at every user of V.
+    for (Use &U : V->uses()) {
+      if (U.getUser() == IgnoredUser)
+        continue; // Don't consider this user.
+
+      auto *I = cast<Instruction>(U.getUser());
+      switch (I->getOpcode()) {
+      case Instruction::Select:
+        if (U.getOperandNo() != 0) // Only if the value is used as select cond.
+          return false;
+        if (shouldAvoidAbsorbingNotIntoSelect(*cast<SelectInst>(I)))
+          return false;
+        break;
+      case Instruction::Br:
+        assert(U.getOperandNo() == 0 && "Must be branching on that value.");
+        break; // Free to invert by swapping true/false values/destinations.
+      case Instruction::Xor: // Can invert 'xor' if it's a 'not', by ignoring
+                             // it.
+        if (!match(I, m_Not(PatternMatch::m_Value())))
+          return false; // Not a 'not'.
+        break;
+      default:
+        return false; // Don't know, likely not freely invertible.
+      }
+      // So far all users were free to invert...
+    }
+    return true; // Can freely invert all users!
+  }
+
+  /// Some binary operators require special handling to avoid poison and
+  /// undefined behavior. If a constant vector has undef elements, replace those
+  /// undefs with identity constants if possible because those are always safe
+  /// to execute. If no identity constant exists, replace undef with some other
+  /// safe constant.
+  static Constant *
+  getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In,
+                                bool IsRHSConstant) {
+    auto *InVTy = cast<FixedVectorType>(In->getType());
+
+    Type *EltTy = InVTy->getElementType();
+    auto *SafeC = ConstantExpr::getBinOpIdentity(Opcode, EltTy, IsRHSConstant);
+    if (!SafeC) {
+      // TODO: Should this be available as a constant utility function? It is
+      // similar to getBinOpAbsorber().
+      if (IsRHSConstant) {
+        switch (Opcode) {
+        case Instruction::SRem: // X % 1 = 0
+        case Instruction::URem: // X %u 1 = 0
+          SafeC = ConstantInt::get(EltTy, 1);
+          break;
+        case Instruction::FRem: // X % 1.0 (doesn't simplify, but it is safe)
+          SafeC = ConstantFP::get(EltTy, 1.0);
+          break;
+        default:
+          llvm_unreachable(
+              "Only rem opcodes have no identity constant for RHS");
+        }
+      } else {
+        switch (Opcode) {
+        case Instruction::Shl:  // 0 << X = 0
+        case Instruction::LShr: // 0 >>u X = 0
+        case Instruction::AShr: // 0 >> X = 0
+        case Instruction::SDiv: // 0 / X = 0
+        case Instruction::UDiv: // 0 /u X = 0
+        case Instruction::SRem: // 0 % X = 0
+        case Instruction::URem: // 0 %u X = 0
+        case Instruction::Sub:  // 0 - X (doesn't simplify, but it is safe)
+        case Instruction::FSub: // 0.0 - X (doesn't simplify, but it is safe)
+        case Instruction::FDiv: // 0.0 / X (doesn't simplify, but it is safe)
+        case Instruction::FRem: // 0.0 % X = 0
+          SafeC = Constant::getNullValue(EltTy);
+          break;
+        default:
+          llvm_unreachable("Expected to find identity constant for opcode");
+        }
+      }
+    }
+    assert(SafeC && "Must have safe constant for binop");
+    unsigned NumElts = InVTy->getNumElements();
+    SmallVector<Constant *, 16> Out(NumElts);
+    for (unsigned i = 0; i != NumElts; ++i) {
+      Constant *C = In->getAggregateElement(i);
+      Out[i] = isa<UndefValue>(C) ? SafeC : C;
+    }
+    return ConstantVector::get(Out);
+  }
+
+  /// Create and insert the idiom we use to indicate a block is unreachable
+  /// without having to rewrite the CFG from within InstCombine.
+  static void CreateNonTerminatorUnreachable(Instruction *InsertAt) {
+    auto &Ctx = InsertAt->getContext();
+    new StoreInst(ConstantInt::getTrue(Ctx),
+                  UndefValue::get(Type::getInt1PtrTy(Ctx)), InsertAt);
+  }
+
+  void addToWorklist(Instruction *I) { Worklist.push(I); }
+
+  AssumptionCache &getAssumptionCache() const { return AC; }
+  TargetLibraryInfo &getTargetLibraryInfo() const { return TLI; }
+  DominatorTree &getDominatorTree() const { return DT; }
+  const DataLayout &getDataLayout() const { return DL; }
+  const SimplifyQuery &getSimplifyQuery() const { return SQ; }
+  OptimizationRemarkEmitter &getOptimizationRemarkEmitter() const {
+    return ORE;
+  }
+  BlockFrequencyInfo *getBlockFrequencyInfo() const { return BFI; }
+  ProfileSummaryInfo *getProfileSummaryInfo() const { return PSI; }
+  LoopInfo *getLoopInfo() const { return LI; }
+
+  // Call target specific combiners
+  Optional<Instruction *> targetInstCombineIntrinsic(IntrinsicInst &II);
+  Optional<Value *>
+  targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask,
+                                         KnownBits &Known,
+                                         bool &KnownBitsComputed);
+  Optional<Value *> targetSimplifyDemandedVectorEltsIntrinsic(
+      IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
+      APInt &UndefElts2, APInt &UndefElts3,
+      std::function<void(Instruction *, unsigned, APInt, APInt &)>
+          SimplifyAndSetOp);
+
+  /// Inserts an instruction \p New before instruction \p Old
+  ///
+  /// Also adds the new instruction to the worklist and returns \p New so that
+  /// it is suitable for use as the return from the visitation patterns.
+  Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) {
+    assert(New && !New->getParent() &&
+           "New instruction already inserted into a basic block!");
+    BasicBlock *BB = Old.getParent();
+    BB->getInstList().insert(Old.getIterator(), New); // Insert inst
+    Worklist.push(New);
+    return New;
+  }
+
+  /// Same as InsertNewInstBefore, but also sets the debug loc.
+  Instruction *InsertNewInstWith(Instruction *New, Instruction &Old) {
+    New->setDebugLoc(Old.getDebugLoc());
+    return InsertNewInstBefore(New, Old);
+  }
+
+  /// A combiner-aware RAUW-like routine.
+  ///
+  /// This method is to be used when an instruction is found to be dead,
+  /// replaceable with another preexisting expression. Here we add all uses of
+  /// I to the worklist, replace all uses of I with the new value, then return
+  /// I, so that the inst combiner will know that I was modified.
+  Instruction *replaceInstUsesWith(Instruction &I, Value *V) {
+    // If there are no uses to replace, then we return nullptr to indicate that
+    // no changes were made to the program.
+    if (I.use_empty())
+      return nullptr;
+
+    Worklist.pushUsersToWorkList(I); // Add all modified instrs to worklist.
+
+    // If we are replacing the instruction with itself, this must be in a
+    // segment of unreachable code, so just clobber the instruction.
+    if (&I == V)
+      V = UndefValue::get(I.getType());
+
+    LLVM_DEBUG(dbgs() << "IC: Replacing " << I << "\n"
+                      << "    with " << *V << '\n');
+
+    I.replaceAllUsesWith(V);
+    return &I;
+  }
+
+  /// Replace operand of instruction and add old operand to the worklist.
+  Instruction *replaceOperand(Instruction &I, unsigned OpNum, Value *V) {
+    Worklist.addValue(I.getOperand(OpNum));
+    I.setOperand(OpNum, V);
+    return &I;
+  }
+
+  /// Replace use and add the previously used value to the worklist.
+  void replaceUse(Use &U, Value *NewValue) {
+    Worklist.addValue(U);
+    U = NewValue;
+  }
+
+  /// Combiner aware instruction erasure.
+  ///
+  /// When dealing with an instruction that has side effects or produces a void
+  /// value, we can't rely on DCE to delete the instruction. Instead, visit
+  /// methods should return the value returned by this function.
+  virtual Instruction *eraseInstFromFunction(Instruction &I) = 0;
+
+  void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
+                        const Instruction *CxtI) const {
+    llvm::computeKnownBits(V, Known, DL, Depth, &AC, CxtI, &DT);
+  }
+
+  KnownBits computeKnownBits(const Value *V, unsigned Depth,
+                             const Instruction *CxtI) const {
+    return llvm::computeKnownBits(V, DL, Depth, &AC, CxtI, &DT);
+  }
+
+  bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero = false,
+                              unsigned Depth = 0,
+                              const Instruction *CxtI = nullptr) {
+    return llvm::isKnownToBeAPowerOfTwo(V, DL, OrZero, Depth, &AC, CxtI, &DT);
+  }
+
+  bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth = 0,
+                         const Instruction *CxtI = nullptr) const {
+    return llvm::MaskedValueIsZero(V, Mask, DL, Depth, &AC, CxtI, &DT);
+  }
+
+  unsigned ComputeNumSignBits(const Value *Op, unsigned Depth = 0,
+                              const Instruction *CxtI = nullptr) const {
+    return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT);
+  }
+
+  OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
+                                               const Value *RHS,
+                                               const Instruction *CxtI) const {
+    return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, &AC, CxtI, &DT);
+  }
+
+  OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
+                                             const Instruction *CxtI) const {
+    return llvm::computeOverflowForSignedMul(LHS, RHS, DL, &AC, CxtI, &DT);
+  }
+
+  OverflowResult computeOverflowForUnsignedAdd(const Value *LHS,
+                                               const Value *RHS,
+                                               const Instruction *CxtI) const {
+    return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, &AC, CxtI, &DT);
+  }
+
+  OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS,
+                                             const Instruction *CxtI) const {
+    return llvm::computeOverflowForSignedAdd(LHS, RHS, DL, &AC, CxtI, &DT);
+  }
+
+  OverflowResult computeOverflowForUnsignedSub(const Value *LHS,
+                                               const Value *RHS,
+                                               const Instruction *CxtI) const {
+    return llvm::computeOverflowForUnsignedSub(LHS, RHS, DL, &AC, CxtI, &DT);
+  }
+
+  OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
+                                             const Instruction *CxtI) const {
+    return llvm::computeOverflowForSignedSub(LHS, RHS, DL, &AC, CxtI, &DT);
+  }
+
+  virtual bool SimplifyDemandedBits(Instruction *I, unsigned OpNo,
+                                    const APInt &DemandedMask, KnownBits &Known,
+                                    unsigned Depth = 0) = 0;
+  virtual Value *
+  SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &UndefElts,
+                             unsigned Depth = 0,
+                             bool AllowMultipleUsers = false) = 0;
+};
+
+} // namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation.h
index 8b70d29..c960d5b 100644
--- a/linux-x64/clang/include/llvm/Transforms/Instrumentation.h
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation.h
@@ -28,6 +28,7 @@
 class ModulePass;
 class OptimizationRemarkEmitter;
 class Comdat;
+class CallBase;
 
 /// Instrumentation passes often insert conditional checks into entry blocks.
 /// Call this function before splitting the entry block to move instructions
@@ -62,20 +63,11 @@
   // gcc's gcov-io.h
   char Version[4];
 
-  // Emit a "cfg checksum" that follows the "line number checksum" of a
-  // function. This affects both .gcno and .gcda files.
-  bool UseCfgChecksum;
-
   // Add the 'noredzone' attribute to added runtime library calls.
   bool NoRedZone;
 
-  // Emit the name of the function in the .gcda files. This is redundant, as
-  // the function identifier can be used to find the name from the .gcno file.
-  bool FunctionNamesInData;
-
-  // Emit the exit block immediately after the start block, rather than after
-  // all of the function body's blocks.
-  bool ExitBlockBeforeBody;
+  // Use atomic profile counter increments.
+  bool Atomic = false;
 
   // Regexes separated by a semi-colon to filter the files to instrument.
   std::string Filter;
@@ -99,6 +91,8 @@
                                                      bool SamplePGO = false);
 FunctionPass *createPGOMemOPSizeOptLegacyPass();
 
+ModulePass *createCGProfileLegacyPass();
+
 // The pgo-specific indirect call promotion function declared below is used by
 // the pgo-driven indirect call promotion and sample profile passes. It's a
 // wrapper around llvm::promoteCall, et al. that additionally computes !prof
@@ -106,7 +100,7 @@
 // generic utilities.
 namespace pgo {
 
-// Helper function that transforms Inst (either an indirect-call instruction, or
+// Helper function that transforms CB (either an indirect-call instruction, or
 // an invoke instruction , to a conditional call to F. This is like:
 //     if (Inst.CalledValue == F)
 //        F(...);
@@ -119,10 +113,9 @@
 // If \p AttachProfToDirectCall is true, a prof metadata is attached to the
 // new direct call to contain \p Count.
 // Returns the promoted direct call instruction.
-Instruction *promoteIndirectCall(Instruction *Inst, Function *F, uint64_t Count,
-                                 uint64_t TotalCount,
-                                 bool AttachProfToDirectCall,
-                                 OptimizationRemarkEmitter *ORE);
+CallBase &promoteIndirectCall(CallBase &CB, Function *F, uint64_t Count,
+                              uint64_t TotalCount, bool AttachProfToDirectCall,
+                              OptimizationRemarkEmitter *ORE);
 } // namespace pgo
 
 /// Options for the frontend instrumentation based profiling pass.
@@ -153,9 +146,8 @@
 ModulePass *createInstrOrderFilePass();
 
 // Insert DataFlowSanitizer (dynamic data flow analysis) instrumentation
-ModulePass *createDataFlowSanitizerPass(
-    const std::vector<std::string> &ABIListFiles = std::vector<std::string>(),
-    void *(*getArgTLS)() = nullptr, void *(*getRetValTLS)() = nullptr);
+ModulePass *createDataFlowSanitizerLegacyPassPass(
+    const std::vector<std::string> &ABIListFiles = std::vector<std::string>());
 
 // Options for sanitizer coverage instrumentation.
 struct SanitizerCoverageOptions {
@@ -174,6 +166,7 @@
   bool TracePC = false;
   bool TracePCGuard = false;
   bool Inline8bitCounters = false;
+  bool InlineBoolFlag = false;
   bool PCTable = false;
   bool NoPrune = false;
   bool StackDepth = false;
@@ -181,10 +174,6 @@
   SanitizerCoverageOptions() = default;
 };
 
-// Insert SanitizerCoverage instrumentation.
-ModulePass *createSanitizerCoverageModulePass(
-    const SanitizerCoverageOptions &Options = SanitizerCoverageOptions());
-
 /// Calculate what to divide by to scale counts.
 ///
 /// Given the maximum count, calculate a divisor that will scale all the
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/AddressSanitizer.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/AddressSanitizer.h
index 40007a9..53ad0cb 100644
--- a/linux-x64/clang/include/llvm/Transforms/Instrumentation/AddressSanitizer.h
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/AddressSanitizer.h
@@ -39,7 +39,7 @@
     LocationMetadata SourceLoc;
     StringRef Name;
     bool IsDynInit = false;
-    bool IsBlacklisted = false;
+    bool IsExcluded = false;
 
     Entry() = default;
   };
@@ -102,6 +102,7 @@
                                 bool Recover = false,
                                 bool UseAfterScope = false);
   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  static bool isRequired() { return true; }
 
 private:
   bool CompileKernel;
@@ -122,6 +123,7 @@
                                       bool UseGlobalGC = true,
                                       bool UseOdrIndicator = false);
   PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  static bool isRequired() { return true; }
 
 private:
   bool CompileKernel;
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
new file mode 100644
index 0000000..7da0bf8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
@@ -0,0 +1,49 @@
+//===--------- Definition of the AddressSanitizer class ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares common infrastructure for AddressSanitizer and
+// HWAddressSanitizer.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZERCOMMON_H
+#define LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZERCOMMON_H
+
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Module.h"
+
+namespace llvm {
+
+class InterestingMemoryOperand {
+public:
+  Use *PtrUse;
+  bool IsWrite;
+  Type *OpType;
+  uint64_t TypeSize;
+  MaybeAlign Alignment;
+  // The mask Value, if we're looking at a masked load/store.
+  Value *MaybeMask;
+
+  InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite,
+                           class Type *OpType, MaybeAlign Alignment,
+                           Value *MaybeMask = nullptr)
+      : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment),
+        MaybeMask(MaybeMask) {
+    const DataLayout &DL = I->getModule()->getDataLayout();
+    TypeSize = DL.getTypeStoreSizeInBits(OpType);
+    PtrUse = &I->getOperandUse(OperandNo);
+  }
+
+  Instruction *getInsn() { return cast<Instruction>(PtrUse->getUser()); }
+
+  Value *getPtr() { return PtrUse->get(); }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/BoundsChecking.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/BoundsChecking.h
index 120c6a8..8d70f14 100644
--- a/linux-x64/clang/include/llvm/Transforms/Instrumentation/BoundsChecking.h
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/BoundsChecking.h
@@ -17,6 +17,7 @@
 /// stores, and other memory intrinsics.
 struct BoundsCheckingPass : PassInfoMixin<BoundsCheckingPass> {
   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  static bool isRequired() { return true; }
 };
 
 
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/CGProfile.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/CGProfile.h
index 28fd380..4cb45fd 100644
--- a/linux-x64/clang/include/llvm/Transforms/Instrumentation/CGProfile.h
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/CGProfile.h
@@ -19,11 +19,6 @@
 class CGProfilePass : public PassInfoMixin<CGProfilePass> {
 public:
   PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
-
-private:
-  void addModuleFlags(
-      Module &M,
-      MapVector<std::pair<Function *, Function *>, uint64_t> &Counts) const;
 };
 } // end namespace llvm
 
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/DataFlowSanitizer.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/DataFlowSanitizer.h
new file mode 100644
index 0000000..9b57b1f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/DataFlowSanitizer.h
@@ -0,0 +1,32 @@
+//===- DataFlowSanitizer.h - dynamic data flow analysis -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_DATAFLOWSANITIZER_H
+#define LLVM_TRANSFORMS_INSTRUMENTATION_DATAFLOWSANITIZER_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class DataFlowSanitizerPass : public PassInfoMixin<DataFlowSanitizerPass> {
+private:
+  std::vector<std::string> ABIListFiles;
+
+public:
+  DataFlowSanitizerPass(
+      const std::vector<std::string> &ABIListFiles = std::vector<std::string>())
+      : ABIListFiles(ABIListFiles) {}
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  static bool isRequired() { return true; }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/GCOVProfiler.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/GCOVProfiler.h
index b3971e4..2766cc5 100644
--- a/linux-x64/clang/include/llvm/Transforms/Instrumentation/GCOVProfiler.h
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/GCOVProfiler.h
@@ -26,5 +26,5 @@
   GCOVOptions GCOVOpts;
 };
 
-} // End llvm namespace
+} // namespace llvm
 #endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/HWAddressSanitizer.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/HWAddressSanitizer.h
index e34cf6c..68b4732 100644
--- a/linux-x64/clang/include/llvm/Transforms/Instrumentation/HWAddressSanitizer.h
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/HWAddressSanitizer.h
@@ -26,7 +26,8 @@
 public:
   explicit HWAddressSanitizerPass(bool CompileKernel = false,
                                   bool Recover = false);
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
+  static bool isRequired() { return true; }
 
 private:
   bool CompileKernel;
@@ -36,6 +37,24 @@
 FunctionPass *createHWAddressSanitizerLegacyPassPass(bool CompileKernel = false,
                                                      bool Recover = false);
 
+namespace HWASanAccessInfo {
+
+// Bit field positions for the accessinfo parameter to
+// llvm.hwasan.check.memaccess. Shared between the pass and the backend. Bits
+// 0-15 are also used by the runtime.
+enum {
+  AccessSizeShift = 0, // 4 bits
+  IsWriteShift = 4,
+  RecoverShift = 5,
+  MatchAllShift = 16, // 8 bits
+  HasMatchAllShift = 24,
+  CompileKernelShift = 25,
+};
+
+enum { RuntimeMask = 0xffff };
+
+} // namespace HWASanAccessInfo
+
 } // namespace llvm
 
 #endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/InstrProfiling.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/InstrProfiling.h
index 8f76d4a..5ce72cd 100644
--- a/linux-x64/clang/include/llvm/Transforms/Instrumentation/InstrProfiling.h
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/InstrProfiling.h
@@ -39,13 +39,14 @@
       : Options(Options), IsCS(IsCS) {}
 
   PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
-  bool run(Module &M, const TargetLibraryInfo &TLI);
+  bool run(Module &M,
+           std::function<const TargetLibraryInfo &(Function &F)> GetTLI);
 
 private:
   InstrProfOptions Options;
   Module *M;
   Triple TT;
-  const TargetLibraryInfo *TLI;
+  std::function<const TargetLibraryInfo &(Function &F)> GetTLI;
   struct PerFunctionProfileData {
     uint32_t NumValueSites[IPVK_Last + 1];
     GlobalVariable *RegionCounters = nullptr;
@@ -67,11 +68,6 @@
   // vector of counter load/store pairs to be register promoted.
   std::vector<LoadStorePair> PromotionCandidates;
 
-  // The start value of precise value profile range for memory intrinsic sizes.
-  int64_t MemOPSizeRangeStart;
-  // The end value of precise value profile range for memory intrinsic sizes.
-  int64_t MemOPSizeRangeLast;
-
   int64_t TotalCountersPromoted = 0;
 
   /// Lower instrumentation intrinsics in the function. Returns true if there
@@ -81,6 +77,9 @@
   /// Register-promote counter loads and stores in loops.
   void promoteCounterLoadStores(Function *F);
 
+  /// Returns true if relocating counters at runtime is enabled.
+  bool isRuntimeCounterRelocationEnabled() const;
+
   /// Returns true if profile counter update register promotion is enabled.
   bool isCounterPromotionEnabled() const;
 
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/MemProfiler.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/MemProfiler.h
new file mode 100644
index 0000000..ac6a07d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/MemProfiler.h
@@ -0,0 +1,51 @@
+//===--------- Definition of the MemProfiler class --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MemProfiler class.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_MEMPROFILER_H
+#define LLVM_TRANSFORMS_INSTRUMENTATION_MEMPROFILER_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Public interface to the memory profiler pass for instrumenting code to
+/// profile memory accesses.
+///
+/// The profiler itself is a function pass that works by inserting various
+/// calls to the MemProfiler runtime library functions. The runtime library
+/// essentially replaces malloc() and free() with custom implementations that
+/// record data about the allocations.
+class MemProfilerPass : public PassInfoMixin<MemProfilerPass> {
+public:
+  explicit MemProfilerPass();
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  static bool isRequired() { return true; }
+};
+
+/// Public interface to the memory profiler module pass for instrumenting code
+/// to profile memory allocations and accesses.
+class ModuleMemProfilerPass : public PassInfoMixin<ModuleMemProfilerPass> {
+public:
+  explicit ModuleMemProfilerPass();
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  static bool isRequired() { return true; }
+};
+
+// Insert MemProfiler instrumentation
+FunctionPass *createMemProfilerFunctionPass();
+ModulePass *createModuleMemProfilerLegacyPassPass();
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/MemorySanitizer.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/MemorySanitizer.h
index 0739d9e..f5f9ec7 100644
--- a/linux-x64/clang/include/llvm/Transforms/Instrumentation/MemorySanitizer.h
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/MemorySanitizer.h
@@ -19,12 +19,11 @@
 namespace llvm {
 
 struct MemorySanitizerOptions {
-  MemorySanitizerOptions() = default;
-  MemorySanitizerOptions(int TrackOrigins, bool Recover, bool Kernel)
-      : TrackOrigins(TrackOrigins), Recover(Recover), Kernel(Kernel) {}
-  int TrackOrigins = 0;
-  bool Recover = false;
-  bool Kernel = false;
+  MemorySanitizerOptions() : MemorySanitizerOptions(0, false, false){};
+  MemorySanitizerOptions(int TrackOrigins, bool Recover, bool Kernel);
+  bool Kernel;
+  int TrackOrigins;
+  bool Recover;
 };
 
 // Insert MemorySanitizer instrumentation (detection of uninitialized reads)
@@ -41,6 +40,8 @@
   MemorySanitizerPass(MemorySanitizerOptions Options) : Options(Options) {}
 
   PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  static bool isRequired() { return true; }
 
 private:
   MemorySanitizerOptions Options;
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/SanitizerCoverage.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/SanitizerCoverage.h
new file mode 100644
index 0000000..e3d268c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/SanitizerCoverage.h
@@ -0,0 +1,67 @@
+//===--------- Definition of the SanitizerCoverage class --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SanitizerCoverage class which is a port of the legacy
+// SanitizerCoverage pass to use the new PassManager infrastructure.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_SANITIZERCOVERAGE_H
+#define LLVM_TRANSFORMS_INSTRUMENTATION_SANITIZERCOVERAGE_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/SpecialCaseList.h"
+#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/Transforms/Instrumentation.h"
+
+namespace llvm {
+
+/// This is the ModuleSanitizerCoverage pass used in the new pass manager. The
+/// pass instruments functions for coverage, adds initialization calls to the
+/// module for trace PC guards and 8bit counters if they are requested, and
+/// appends globals to llvm.compiler.used.
+class ModuleSanitizerCoveragePass
+    : public PassInfoMixin<ModuleSanitizerCoveragePass> {
+public:
+  explicit ModuleSanitizerCoveragePass(
+      SanitizerCoverageOptions Options = SanitizerCoverageOptions(),
+      const std::vector<std::string> &AllowlistFiles =
+          std::vector<std::string>(),
+      const std::vector<std::string> &BlocklistFiles =
+          std::vector<std::string>())
+      : Options(Options) {
+    if (AllowlistFiles.size() > 0)
+      Allowlist = SpecialCaseList::createOrDie(AllowlistFiles,
+                                               *vfs::getRealFileSystem());
+    if (BlocklistFiles.size() > 0)
+      Blocklist = SpecialCaseList::createOrDie(BlocklistFiles,
+                                               *vfs::getRealFileSystem());
+  }
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  static bool isRequired() { return true; }
+
+private:
+  SanitizerCoverageOptions Options;
+
+  std::unique_ptr<SpecialCaseList> Allowlist;
+  std::unique_ptr<SpecialCaseList> Blocklist;
+};
+
+// Insert SanitizerCoverage instrumentation.
+ModulePass *createModuleSanitizerCoverageLegacyPassPass(
+    const SanitizerCoverageOptions &Options = SanitizerCoverageOptions(),
+    const std::vector<std::string> &AllowlistFiles = std::vector<std::string>(),
+    const std::vector<std::string> &BlocklistFiles =
+        std::vector<std::string>());
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/ThreadSanitizer.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/ThreadSanitizer.h
index b4e7d99..f9c5076 100644
--- a/linux-x64/clang/include/llvm/Transforms/Instrumentation/ThreadSanitizer.h
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/ThreadSanitizer.h
@@ -27,6 +27,9 @@
 /// yet, the pass inserts the declarations. Otherwise the existing globals are
 struct ThreadSanitizerPass : public PassInfoMixin<ThreadSanitizerPass> {
   PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  static bool isRequired() { return true; }
 };
+
 } // namespace llvm
 #endif /* LLVM_TRANSFORMS_INSTRUMENTATION_THREADSANITIZER_H */
diff --git a/linux-x64/clang/include/llvm/Transforms/ObjCARC.h b/linux-x64/clang/include/llvm/Transforms/ObjCARC.h
index 2f114c7..a89df95 100644
--- a/linux-x64/clang/include/llvm/Transforms/ObjCARC.h
+++ b/linux-x64/clang/include/llvm/Transforms/ObjCARC.h
@@ -14,6 +14,8 @@
 #ifndef LLVM_TRANSFORMS_OBJCARC_H
 #define LLVM_TRANSFORMS_OBJCARC_H
 
+#include "llvm/IR/PassManager.h"
+
 namespace llvm {
 
 class Pass;
@@ -42,6 +44,22 @@
 //
 Pass *createObjCARCOptPass();
 
+struct ObjCARCOptPass : public PassInfoMixin<ObjCARCOptPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+struct ObjCARCContractPass : public PassInfoMixin<ObjCARCContractPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+struct ObjCARCAPElimPass : public PassInfoMixin<ObjCARCAPElimPass> {
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+struct ObjCARCExpandPass : public PassInfoMixin<ObjCARCExpandPass> {
+  PreservedAnalyses run(Function &M, FunctionAnalysisManager &AM);
+};
+
 } // End llvm namespace
 
 #endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar.h b/linux-x64/clang/include/llvm/Transforms/Scalar.h
index f9360b5..3217257 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar.h
@@ -14,25 +14,15 @@
 #ifndef LLVM_TRANSFORMS_SCALAR_H
 #define LLVM_TRANSFORMS_SCALAR_H
 
+#include "llvm/Transforms/Utils/SimplifyCFGOptions.h"
 #include <functional>
 
 namespace llvm {
 
-class BasicBlockPass;
 class Function;
 class FunctionPass;
 class ModulePass;
 class Pass;
-class GetElementPtrInst;
-class PassInfo;
-class TargetLowering;
-class TargetMachine;
-
-//===----------------------------------------------------------------------===//
-//
-// ConstantPropagation - A worklist driven constant propagation pass
-//
-FunctionPass *createConstantPropagationPass();
 
 //===----------------------------------------------------------------------===//
 //
@@ -43,17 +33,22 @@
 
 //===----------------------------------------------------------------------===//
 //
+// AnnotationRemarks - Emit remarks for !annotation metadata.
+//
+FunctionPass *createAnnotationRemarksLegacyPass();
+
+//===----------------------------------------------------------------------===//
+//
 // SCCP - Sparse conditional constant propagation.
 //
 FunctionPass *createSCCPPass();
 
 //===----------------------------------------------------------------------===//
 //
-// DeadInstElimination - This pass quickly removes trivially dead instructions
-// without modifying the CFG of the function.  It is a BasicBlockPass, so it
-// runs efficiently when queued next to other BasicBlockPass's.
+// RedundantDbgInstElimination - This pass removes redundant dbg intrinsics
+// without modifying the CFG of the function.  It is a FunctionPass.
 //
-Pass *createDeadInstEliminationPass();
+Pass *createRedundantDbgInstEliminationPass();
 
 //===----------------------------------------------------------------------===//
 //
@@ -162,6 +157,12 @@
 
 //===----------------------------------------------------------------------===//
 //
+// LoopFlatten - This pass flattens nested loops into a single loop.
+//
+FunctionPass *createLoopFlattenPass();
+
+//===----------------------------------------------------------------------===//
+//
 // LoopStrengthReduce - This pass is strength reduces GEP instructions that use
 // a loop's canonical induction variable as one of their indices.
 //
@@ -244,10 +245,12 @@
 //===----------------------------------------------------------------------===//
 //
 // JumpThreading - Thread control through mult-pred/multi-succ blocks where some
-// preds always go to some succ. Thresholds other than minus one override the
-// internal BB duplication default threshold.
+// preds always go to some succ. If FreezeSelectCond is true, unfold the
+// condition of a select that unfolds to branch. Thresholds other than minus one
+// override the internal BB duplication default threshold.
 //
-FunctionPass *createJumpThreadingPass(int Threshold = -1);
+FunctionPass *createJumpThreadingPass(bool FreezeSelectCond = false,
+                                      int Threshold = -1);
 
 //===----------------------------------------------------------------------===//
 //
@@ -255,8 +258,7 @@
 // simplify terminator instructions, convert switches to lookup tables, etc.
 //
 FunctionPass *createCFGSimplificationPass(
-    unsigned Threshold = 1, bool ForwardSwitchCond = false,
-    bool ConvertSwitch = false, bool KeepLoops = true, bool SinkCommon = false,
+    SimplifyCFGOptions Options = SimplifyCFGOptions(),
     std::function<bool(const Function &)> Ftor = nullptr);
 
 //===----------------------------------------------------------------------===//
@@ -308,7 +310,7 @@
 // MergedLoadStoreMotion - This pass merges loads and stores in diamonds. Loads
 // are hoisted into the header, while stores sink into the footer.
 //
-FunctionPass *createMergedLoadStoreMotionPass();
+FunctionPass *createMergedLoadStoreMotionPass(bool SplitFooterBB = false);
 
 //===----------------------------------------------------------------------===//
 //
@@ -345,6 +347,13 @@
 
 //===----------------------------------------------------------------------===//
 //
+// ConstraintElimination - This pass eliminates conditions based on found
+//                         constraints.
+//
+FunctionPass *createConstraintEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
 // Sink - Code Sinking
 //
 FunctionPass *createSinkingPass();
@@ -363,6 +372,19 @@
 
 //===----------------------------------------------------------------------===//
 //
+// LowerMatrixIntrinsics - Lower matrix intrinsics to vector operations.
+//
+Pass *createLowerMatrixIntrinsicsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LowerMatrixIntrinsicsMinimal - Lower matrix intrinsics to vector operations
+//                               (lightweight, does not require extra analysis)
+//
+Pass *createLowerMatrixIntrinsicsMinimalPass();
+
+//===----------------------------------------------------------------------===//
+//
 // LowerWidenableCondition - Lower widenable condition to i1 true.
 //
 Pass *createLowerWidenableConditionPass();
@@ -397,6 +419,13 @@
 
 //===----------------------------------------------------------------------===//
 //
+// LowerConstantIntrinsicss - Expand any remaining llvm.objectsize and
+// llvm.is.constant intrinsic calls, even for the unknown cases.
+//
+FunctionPass *createLowerConstantIntrinsicsPass();
+
+//===----------------------------------------------------------------------===//
+//
 // PartiallyInlineLibCalls - Tries to inline the fast path of library
 // calls such as sqrt.
 //
@@ -509,6 +538,21 @@
 // transformations.
 //
 Pass *createWarnMissedTransformationsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// This pass does instruction simplification on each
+// instruction in a function.
+//
+FunctionPass *createInstSimplifyLegacyPass();
+
+
+//===----------------------------------------------------------------------===//
+//
+// createScalarizeMaskedMemIntrinPass - Replace masked load, store, gather
+// and scatter intrinsics with scalar code when target doesn't support them.
+//
+FunctionPass *createScalarizeMaskedMemIntrinLegacyPass();
 } // End llvm namespace
 
 #endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h b/linux-x64/clang/include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h
index fb1687e..10b6e1c 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h
@@ -17,13 +17,15 @@
 #ifndef LLVM_TRANSFORMS_SCALAR_ALIGNMENTFROMASSUMPTIONS_H
 #define LLVM_TRANSFORMS_SCALAR_ALIGNMENTFROMASSUMPTIONS_H
 
-#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/PassManager.h"
 
 namespace llvm {
 
+class AssumptionCache;
+class DominatorTree;
+class ScalarEvolution;
+class SCEV;
+
 struct AlignmentFromAssumptionsPass
     : public PassInfoMixin<AlignmentFromAssumptionsPass> {
   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
@@ -35,9 +37,9 @@
   ScalarEvolution *SE = nullptr;
   DominatorTree *DT = nullptr;
 
-  bool extractAlignmentInfo(CallInst *I, Value *&AAPtr, const SCEV *&AlignSCEV,
-                            const SCEV *&OffSCEV);
-  bool processAssumption(CallInst *I);
+  bool extractAlignmentInfo(CallInst *I, unsigned Idx, Value *&AAPtr,
+                            const SCEV *&AlignSCEV, const SCEV *&OffSCEV);
+  bool processAssumption(CallInst *I, unsigned Idx);
 };
 }
 
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/AnnotationRemarks.h b/linux-x64/clang/include/llvm/Transforms/Scalar/AnnotationRemarks.h
new file mode 100644
index 0000000..f161976
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/AnnotationRemarks.h
@@ -0,0 +1,26 @@
+//===- AnnotationRemarks.cpp - Emit remarks for !annotation MD --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// \file
+// This file defines AnnotationRemarksPass for the new pass manager.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_ANNOTATION_REMARKS_H
+#define LLVM_TRANSFORMS_SCALAR_ANNOTATION_REMARKS_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+struct AnnotationRemarksPass : public PassInfoMixin<AnnotationRemarksPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_ANNOTATION_REMARKS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/CallSiteSplitting.h b/linux-x64/clang/include/llvm/Transforms/Scalar/CallSiteSplitting.h
index b605563..74cbf84 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/CallSiteSplitting.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/CallSiteSplitting.h
@@ -9,13 +9,8 @@
 #ifndef LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING__H
 #define LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING__H
 
-#include "llvm/ADT/SetVector.h"
-#include "llvm/Analysis/AssumptionCache.h"
-#include "llvm/IR/Dominators.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/PassManager.h"
-#include "llvm/Support/Compiler.h"
-#include <vector>
 
 namespace llvm {
 
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/ConstantHoisting.h b/linux-x64/clang/include/llvm/Transforms/Scalar/ConstantHoisting.h
index 6b0fc9c..11379e5 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/ConstantHoisting.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/ConstantHoisting.h
@@ -14,7 +14,7 @@
 // cost. If the constant can be folded into the instruction (the cost is
 // TCC_Free) or the cost is just a simple operation (TCC_BASIC), then we don't
 // consider it expensive and leave it alone. This is the default behavior and
-// the default implementation of getIntImmCost will always return TCC_Free.
+// the default implementation of getIntImmCostInst will always return TCC_Free.
 //
 // If the cost is more than TCC_BASIC, then the integer constant can't be folded
 // into the instruction and it might be beneficial to hoist the constant.
@@ -37,7 +37,9 @@
 #define LLVM_TRANSFORMS_SCALAR_CONSTANTHOISTING_H
 
 #include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
 #include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SetVector.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/IR/PassManager.h"
@@ -154,21 +156,21 @@
 
   /// Keeps track of constant candidates found in the function.
   using ConstCandVecType = std::vector<consthoist::ConstantCandidate>;
-  using GVCandVecMapType = DenseMap<GlobalVariable *, ConstCandVecType>;
+  using GVCandVecMapType = MapVector<GlobalVariable *, ConstCandVecType>;
   ConstCandVecType ConstIntCandVec;
   GVCandVecMapType ConstGEPCandMap;
 
   /// These are the final constants we decided to hoist.
   using ConstInfoVecType = SmallVector<consthoist::ConstantInfo, 8>;
-  using GVInfoVecMapType = DenseMap<GlobalVariable *, ConstInfoVecType>;
+  using GVInfoVecMapType = MapVector<GlobalVariable *, ConstInfoVecType>;
   ConstInfoVecType ConstIntInfoVec;
   GVInfoVecMapType ConstGEPInfoMap;
 
   /// Keep track of cast instructions we already cloned.
-  SmallDenseMap<Instruction *, Instruction *> ClonedCastMap;
+  MapVector<Instruction *, Instruction *> ClonedCastMap;
 
   Instruction *findMatInsertPt(Instruction *Inst, unsigned Idx = ~0U) const;
-  SmallPtrSet<Instruction *, 8>
+  SetVector<Instruction *>
   findConstantInsertionPoint(const consthoist::ConstantInfo &ConstInfo) const;
   void collectConstantCandidates(ConstCandMapType &ConstCandMap,
                                  Instruction *Inst, unsigned Idx,
@@ -196,7 +198,6 @@
   // constant GEP base.
   bool emitBaseConstants(GlobalVariable *BaseGV);
   void deleteDeadCastInst() const;
-  bool optimizeConstants(Function &Fn);
 };
 
 } // end namespace llvm
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/ConstraintElimination.h b/linux-x64/clang/include/llvm/Transforms/Scalar/ConstraintElimination.h
new file mode 100644
index 0000000..544a6c2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/ConstraintElimination.h
@@ -0,0 +1,24 @@
+//===- ConstraintElimination.h - Constraint elimination pass ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_CONSTRAINTELIMINATION_H
+#define LLVM_TRANSFORMS_SCALAR_CONSTRAINTELIMINATION_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class ConstraintEliminationPass
+    : public PassInfoMixin<ConstraintEliminationPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_CONSTRAINTELIMINATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/DCE.h b/linux-x64/clang/include/llvm/Transforms/Scalar/DCE.h
index 974e4b2..4d83296 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/DCE.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/DCE.h
@@ -23,6 +23,12 @@
 public:
   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 };
+
+class RedundantDbgInstEliminationPass
+    : public PassInfoMixin<RedundantDbgInstEliminationPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
 }
 
 #endif // LLVM_TRANSFORMS_SCALAR_DCE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/Float2Int.h b/linux-x64/clang/include/llvm/Transforms/Scalar/Float2Int.h
index 06aeb83..5fb47af 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/Float2Int.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/Float2Int.h
@@ -16,7 +16,9 @@
 
 #include "llvm/ADT/EquivalenceClasses.h"
 #include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SetVector.h"
 #include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Dominators.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/PassManager.h"
 
@@ -26,22 +28,22 @@
   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 
   // Glue for old PM.
-  bool runImpl(Function &F);
+  bool runImpl(Function &F, const DominatorTree &DT);
 
 private:
-  void findRoots(Function &F, SmallPtrSet<Instruction *, 8> &Roots);
+  void findRoots(Function &F, const DominatorTree &DT);
   void seen(Instruction *I, ConstantRange R);
   ConstantRange badRange();
   ConstantRange unknownRange();
   ConstantRange validateRange(ConstantRange R);
-  void walkBackwards(const SmallPtrSetImpl<Instruction *> &Roots);
+  void walkBackwards();
   void walkForwards();
   bool validateAndTransform();
   Value *convert(Instruction *I, Type *ToTy);
   void cleanup();
 
   MapVector<Instruction *, ConstantRange> SeenInsts;
-  SmallPtrSet<Instruction *, 8> Roots;
+  SmallSetVector<Instruction *, 8> Roots;
   EquivalenceClasses<Instruction *> ECs;
   MapVector<Instruction *, Value *> ConvertedInsts;
   LLVMContext *Ctx;
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/GVN.h b/linux-x64/clang/include/llvm/Transforms/Scalar/GVN.h
index 9fe00a9..d6b3c8c 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/GVN.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/GVN.h
@@ -20,7 +20,6 @@
 #include "llvm/ADT/PostOrderIterator.h"
 #include "llvm/ADT/SetVector.h"
 #include "llvm/ADT/SmallVector.h"
-#include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Analysis/InstructionPrecedenceTracking.h"
 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
 #include "llvm/IR/Dominators.h"
@@ -35,6 +34,7 @@
 
 namespace llvm {
 
+class AAResults;
 class AssumptionCache;
 class BasicBlock;
 class BranchInst;
@@ -46,11 +46,12 @@
 class IntrinsicInst;
 class LoadInst;
 class LoopInfo;
+class MemorySSA;
+class MemorySSAUpdater;
 class OptimizationRemarkEmitter;
 class PHINode;
 class TargetLibraryInfo;
 class Value;
-
 /// A private "module" namespace for types and utilities used by GVN. These
 /// are implementation details and should not be used by clients.
 namespace gvn LLVM_LIBRARY_VISIBILITY {
@@ -61,14 +62,64 @@
 
 } // end namespace gvn
 
+/// A set of parameters to control various transforms performed by GVN pass.
+//  Each of the optional boolean parameters can be set to:
+///      true - enabling the transformation.
+///      false - disabling the transformation.
+///      None - relying on a global default.
+/// Intended use is to create a default object, modify parameters with
+/// additional setters and then pass it to GVN.
+struct GVNOptions {
+  Optional<bool> AllowPRE = None;
+  Optional<bool> AllowLoadPRE = None;
+  Optional<bool> AllowLoadInLoopPRE = None;
+  Optional<bool> AllowLoadPRESplitBackedge = None;
+  Optional<bool> AllowMemDep = None;
+
+  GVNOptions() = default;
+
+  /// Enables or disables PRE in GVN.
+  GVNOptions &setPRE(bool PRE) {
+    AllowPRE = PRE;
+    return *this;
+  }
+
+  /// Enables or disables PRE of loads in GVN.
+  GVNOptions &setLoadPRE(bool LoadPRE) {
+    AllowLoadPRE = LoadPRE;
+    return *this;
+  }
+
+  GVNOptions &setLoadInLoopPRE(bool LoadInLoopPRE) {
+    AllowLoadInLoopPRE = LoadInLoopPRE;
+    return *this;
+  }
+
+  /// Enables or disables PRE of loads in GVN.
+  GVNOptions &setLoadPRESplitBackedge(bool LoadPRESplitBackedge) {
+    AllowLoadPRESplitBackedge = LoadPRESplitBackedge;
+    return *this;
+  }
+
+  /// Enables or disables use of MemDepAnalysis.
+  GVNOptions &setMemDep(bool MemDep) {
+    AllowMemDep = MemDep;
+    return *this;
+  }
+};
+
 /// The core GVN pass object.
 ///
 /// FIXME: We should have a good summary of the GVN algorithm implemented by
 /// this particular pass here.
 class GVN : public PassInfoMixin<GVN> {
+  GVNOptions Options;
+
 public:
   struct Expression;
 
+  GVN(GVNOptions Options = {}) : Options(Options) {}
+
   /// Run the pass over the function.
   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 
@@ -80,9 +131,15 @@
   }
 
   DominatorTree &getDominatorTree() const { return *DT; }
-  AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); }
+  AAResults *getAliasAnalysis() const { return VN.getAliasAnalysis(); }
   MemoryDependenceResults &getMemDep() const { return *MD; }
 
+  bool isPREEnabled() const;
+  bool isLoadPREEnabled() const;
+  bool isLoadInLoopPREEnabled() const;
+  bool isLoadPRESplitBackedgeEnabled() const;
+  bool isMemDepEnabled() const;
+
   /// This class holds the mapping between values and value numbers.  It is used
   /// as an efficient mechanism to determine the expression-wise equivalence of
   /// two values.
@@ -94,7 +151,7 @@
     // value number to the index of Expression in Expressions. We use it
     // instead of a DenseMap because filling such mapping is faster than
     // filling a DenseMap and the compile time is a little better.
-    uint32_t nextExprNumber;
+    uint32_t nextExprNumber = 0;
 
     std::vector<Expression> Expressions;
     std::vector<uint32_t> ExprIdx;
@@ -107,9 +164,9 @@
         DenseMap<std::pair<uint32_t, const BasicBlock *>, uint32_t>;
     PhiTranslateMap PhiTranslateTable;
 
-    AliasAnalysis *AA;
-    MemoryDependenceResults *MD;
-    DominatorTree *DT;
+    AAResults *AA = nullptr;
+    MemoryDependenceResults *MD = nullptr;
+    DominatorTree *DT = nullptr;
 
     uint32_t nextValueNumber = 1;
 
@@ -120,6 +177,8 @@
     uint32_t lookupOrAddCall(CallInst *C);
     uint32_t phiTranslateImpl(const BasicBlock *BB, const BasicBlock *PhiBlock,
                               uint32_t Num, GVN &Gvn);
+    bool areCallValsEqual(uint32_t Num, uint32_t NewNum, const BasicBlock *Pred,
+                          const BasicBlock *PhiBlock, GVN &Gvn);
     std::pair<uint32_t, bool> assignExpNewValueNum(Expression &exp);
     bool areAllValsInBB(uint32_t num, const BasicBlock *BB, GVN &Gvn);
 
@@ -128,6 +187,7 @@
     ValueTable(const ValueTable &Arg);
     ValueTable(ValueTable &&Arg);
     ~ValueTable();
+    ValueTable &operator=(const ValueTable &Arg);
 
     uint32_t lookupOrAdd(Value *V);
     uint32_t lookup(Value *V, bool Verify = true) const;
@@ -140,8 +200,8 @@
     void add(Value *V, uint32_t num);
     void clear();
     void erase(Value *v);
-    void setAliasAnalysis(AliasAnalysis *A) { AA = A; }
-    AliasAnalysis *getAliasAnalysis() const { return AA; }
+    void setAliasAnalysis(AAResults *A) { AA = A; }
+    AAResults *getAliasAnalysis() const { return AA; }
     void setMemDep(MemoryDependenceResults *M) { MD = M; }
     void setDomTree(DominatorTree *D) { DT = D; }
     uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
@@ -152,13 +212,15 @@
   friend class gvn::GVNLegacyPass;
   friend struct DenseMapInfo<Expression>;
 
-  MemoryDependenceResults *MD;
-  DominatorTree *DT;
-  const TargetLibraryInfo *TLI;
-  AssumptionCache *AC;
+  MemoryDependenceResults *MD = nullptr;
+  DominatorTree *DT = nullptr;
+  const TargetLibraryInfo *TLI = nullptr;
+  AssumptionCache *AC = nullptr;
   SetVector<BasicBlock *> DeadBlocks;
-  OptimizationRemarkEmitter *ORE;
-  ImplicitControlFlowTracking *ICF;
+  OptimizationRemarkEmitter *ORE = nullptr;
+  ImplicitControlFlowTracking *ICF = nullptr;
+  LoopInfo *LI = nullptr;
+  MemorySSAUpdater *MSSAU = nullptr;
 
   ValueTable VN;
 
@@ -175,7 +237,7 @@
   // Block-local map of equivalent values to their leader, does not
   // propagate to any successors. Entries added mid-block are applied
   // to the remaining instructions in the block.
-  SmallMapVector<Value *, Constant *, 4> ReplaceWithConstMap;
+  SmallMapVector<Value *, Value *, 4> ReplaceOperandsWithMap;
   SmallVector<Instruction *, 8> InstrsToErase;
 
   // Map the block to reversed postorder traversal number. It is used to
@@ -194,7 +256,7 @@
   bool runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
                const TargetLibraryInfo &RunTLI, AAResults &RunAA,
                MemoryDependenceResults *RunMD, LoopInfo *LI,
-               OptimizationRemarkEmitter *ORE);
+               OptimizationRemarkEmitter *ORE, MemorySSA *MSSA = nullptr);
 
   /// Push a new Value to the LeaderTable onto the list for its value number.
   void addToLeaderTable(uint32_t N, Value *V, const BasicBlock *BB) {
@@ -276,11 +338,10 @@
                                  BasicBlock *Curr, unsigned int ValNo);
   Value *findLeader(const BasicBlock *BB, uint32_t num);
   void cleanupGlobalSets();
-  void fillImplicitControlFlowInfo(BasicBlock *BB);
   void verifyRemoved(const Instruction *I) const;
   bool splitCriticalEdges();
   BasicBlock *splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ);
-  bool replaceOperandsWithConsts(Instruction *I) const;
+  bool replaceOperandsForInBlockEquality(Instruction *I) const;
   bool propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root,
                          bool DominatesByEdge);
   bool processFoldableCondBr(BranchInst *BI);
@@ -290,8 +351,8 @@
 };
 
 /// Create a legacy GVN pass. This also allows parameterizing whether or not
-/// loads are eliminated by the pass.
-FunctionPass *createGVNPass(bool NoLoads = false);
+/// MemDep is enabled.
+FunctionPass *createGVNPass(bool NoMemDepAnalysis = false);
 
 /// A simple and fast domtree-based GVN pass to hoist common expressions
 /// from sibling branches.
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/GVNExpression.h b/linux-x64/clang/include/llvm/Transforms/Scalar/GVNExpression.h
index 3dc4515..c4a0487 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/GVNExpression.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/GVNExpression.h
@@ -323,7 +323,6 @@
 class LoadExpression final : public MemoryExpression {
 private:
   LoadInst *Load;
-  unsigned Alignment;
 
 public:
   LoadExpression(unsigned NumOperands, LoadInst *L,
@@ -332,9 +331,7 @@
 
   LoadExpression(enum ExpressionType EType, unsigned NumOperands, LoadInst *L,
                  const MemoryAccess *MemoryLeader)
-      : MemoryExpression(NumOperands, EType, MemoryLeader), Load(L) {
-    Alignment = L ? L->getAlignment() : 0;
-  }
+      : MemoryExpression(NumOperands, EType, MemoryLeader), Load(L) {}
 
   LoadExpression() = delete;
   LoadExpression(const LoadExpression &) = delete;
@@ -348,9 +345,6 @@
   LoadInst *getLoadInst() const { return Load; }
   void setLoadInst(LoadInst *L) { Load = L; }
 
-  unsigned getAlignment() const { return Alignment; }
-  void setAlignment(unsigned Align) { Alignment = Align; }
-
   bool equals(const Expression &Other) const override;
   bool exactlyEquals(const Expression &Other) const override {
     return Expression::exactlyEquals(Other) &&
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/IndVarSimplify.h b/linux-x64/clang/include/llvm/Transforms/Scalar/IndVarSimplify.h
index 3c20537..b5d544f 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/IndVarSimplify.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/IndVarSimplify.h
@@ -23,7 +23,11 @@
 class LPMUpdater;
 
 class IndVarSimplifyPass : public PassInfoMixin<IndVarSimplifyPass> {
+  /// Perform IV widening during the pass.
+  bool WidenIndVars;
+
 public:
+  IndVarSimplifyPass(bool WidenIndVars = true) : WidenIndVars(WidenIndVars) {}
   PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                         LoopStandardAnalysisResults &AR, LPMUpdater &U);
 };
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/InductiveRangeCheckElimination.h b/linux-x64/clang/include/llvm/Transforms/Scalar/InductiveRangeCheckElimination.h
index b1e7007..11fb80e 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/InductiveRangeCheckElimination.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/InductiveRangeCheckElimination.h
@@ -15,14 +15,12 @@
 #define LLVM_TRANSFORMS_SCALAR_INDUCTIVERANGECHECKELIMINATION_H
 
 #include "llvm/IR/PassManager.h"
-#include "llvm/Transforms/Scalar/LoopPassManager.h"
 
 namespace llvm {
 
 class IRCEPass : public PassInfoMixin<IRCEPass> {
 public:
-  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
-                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 };
 
 } // end namespace llvm
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/InferAddressSpaces.h b/linux-x64/clang/include/llvm/Transforms/Scalar/InferAddressSpaces.h
new file mode 100644
index 0000000..9a56b07
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/InferAddressSpaces.h
@@ -0,0 +1,27 @@
+//===- InferAddressSpace.h - ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_INFERADDRESSSPACES_H
+#define LLVM_TRANSFORMS_SCALAR_INFERADDRESSSPACES_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+struct InferAddressSpacesPass : PassInfoMixin<InferAddressSpacesPass> {
+  InferAddressSpacesPass();
+  InferAddressSpacesPass(unsigned AddressSpace);
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+private:
+  unsigned FlatAddrSpace = 0;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_INFERADDRESSSPACES_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/InstSimplifyPass.h b/linux-x64/clang/include/llvm/Transforms/Scalar/InstSimplifyPass.h
index 0c30b62..f36695a 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/InstSimplifyPass.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/InstSimplifyPass.h
@@ -36,10 +36,6 @@
   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 };
 
-/// Create a legacy pass that does instruction simplification on each
-/// instruction in a function.
-FunctionPass *createInstSimplifyLegacyPass();
-
 } // end namespace llvm
 
 #endif // LLVM_TRANSFORMS_UTILS_INSTSIMPLIFYPASS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/JumpThreading.h b/linux-x64/clang/include/llvm/Transforms/Scalar/JumpThreading.h
index 0464d40..951f4e4 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/JumpThreading.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/JumpThreading.h
@@ -19,7 +19,6 @@
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallSet.h"
 #include "llvm/ADT/SmallVector.h"
-#include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Analysis/BlockFrequencyInfo.h"
 #include "llvm/Analysis/BranchProbabilityInfo.h"
 #include "llvm/Analysis/DomTreeUpdater.h"
@@ -29,6 +28,7 @@
 
 namespace llvm {
 
+class AAResults;
 class BasicBlock;
 class BinaryOperator;
 class BranchInst;
@@ -41,6 +41,8 @@
 class LazyValueInfo;
 class LoadInst;
 class PHINode;
+class SelectInst;
+class SwitchInst;
 class TargetLibraryInfo;
 class Value;
 
@@ -77,7 +79,7 @@
 class JumpThreadingPass : public PassInfoMixin<JumpThreadingPass> {
   TargetLibraryInfo *TLI;
   LazyValueInfo *LVI;
-  AliasAnalysis *AA;
+  AAResults *AA;
   DomTreeUpdater *DTU;
   std::unique_ptr<BlockFrequencyInfo> BFI;
   std::unique_ptr<BranchProbabilityInfo> BPI;
@@ -90,15 +92,17 @@
 #endif
 
   unsigned BBDupThreshold;
+  unsigned DefaultBBDupThreshold;
+  bool InsertFreezeWhenUnfoldingSelect;
 
 public:
-  JumpThreadingPass(int T = -1);
+  JumpThreadingPass(bool InsertFreezeWhenUnfoldingSelect = false, int T = -1);
 
   // Glue for old PM.
-  bool runImpl(Function &F, TargetLibraryInfo *TLI_, LazyValueInfo *LVI_,
-               AliasAnalysis *AA_, DomTreeUpdater *DTU_, bool HasProfileData_,
-               std::unique_ptr<BlockFrequencyInfo> BFI_,
-               std::unique_ptr<BranchProbabilityInfo> BPI_);
+  bool runImpl(Function &F, TargetLibraryInfo *TLI, LazyValueInfo *LVI,
+               AAResults *AA, DomTreeUpdater *DTU, bool HasProfileData,
+               std::unique_ptr<BlockFrequencyInfo> BFI,
+               std::unique_ptr<BranchProbabilityInfo> BPI);
 
   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 
@@ -107,51 +111,65 @@
     BPI.reset();
   }
 
-  void FindLoopHeaders(Function &F);
-  bool ProcessBlock(BasicBlock *BB);
-  bool ThreadEdge(BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs,
+  void findLoopHeaders(Function &F);
+  bool processBlock(BasicBlock *BB);
+  bool maybeMergeBasicBlockIntoOnlyPred(BasicBlock *BB);
+  void updateSSA(BasicBlock *BB, BasicBlock *NewBB,
+                 DenseMap<Instruction *, Value *> &ValueMapping);
+  DenseMap<Instruction *, Value *> cloneInstructions(BasicBlock::iterator BI,
+                                                     BasicBlock::iterator BE,
+                                                     BasicBlock *NewBB,
+                                                     BasicBlock *PredBB);
+  bool tryThreadEdge(BasicBlock *BB,
+                     const SmallVectorImpl<BasicBlock *> &PredBBs,
+                     BasicBlock *SuccBB);
+  void threadEdge(BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs,
                   BasicBlock *SuccBB);
-  bool DuplicateCondBranchOnPHIIntoPred(
+  bool duplicateCondBranchOnPHIIntoPred(
       BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs);
 
-  bool ComputeValueKnownInPredecessorsImpl(
+  bool computeValueKnownInPredecessorsImpl(
       Value *V, BasicBlock *BB, jumpthreading::PredValueInfo &Result,
       jumpthreading::ConstantPreference Preference,
-      DenseSet<std::pair<Value *, BasicBlock *>> &RecursionSet,
-      Instruction *CxtI = nullptr);
+      DenseSet<Value *> &RecursionSet, Instruction *CxtI = nullptr);
   bool
-  ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,
+  computeValueKnownInPredecessors(Value *V, BasicBlock *BB,
                                   jumpthreading::PredValueInfo &Result,
                                   jumpthreading::ConstantPreference Preference,
                                   Instruction *CxtI = nullptr) {
-    DenseSet<std::pair<Value *, BasicBlock *>> RecursionSet;
-    return ComputeValueKnownInPredecessorsImpl(V, BB, Result, Preference,
+    DenseSet<Value *> RecursionSet;
+    return computeValueKnownInPredecessorsImpl(V, BB, Result, Preference,
                                                RecursionSet, CxtI);
   }
 
-  bool ProcessThreadableEdges(Value *Cond, BasicBlock *BB,
+  Constant *evaluateOnPredecessorEdge(BasicBlock *BB, BasicBlock *PredPredBB,
+                                      Value *cond);
+  bool maybethreadThroughTwoBasicBlocks(BasicBlock *BB, Value *Cond);
+  void threadThroughTwoBasicBlocks(BasicBlock *PredPredBB, BasicBlock *PredBB,
+                                   BasicBlock *BB, BasicBlock *SuccBB);
+  bool processThreadableEdges(Value *Cond, BasicBlock *BB,
                               jumpthreading::ConstantPreference Preference,
                               Instruction *CxtI = nullptr);
 
-  bool ProcessBranchOnPHI(PHINode *PN);
-  bool ProcessBranchOnXOR(BinaryOperator *BO);
-  bool ProcessImpliedCondition(BasicBlock *BB);
+  bool processBranchOnPHI(PHINode *PN);
+  bool processBranchOnXOR(BinaryOperator *BO);
+  bool processImpliedCondition(BasicBlock *BB);
 
-  bool SimplifyPartiallyRedundantLoad(LoadInst *LI);
-  void UnfoldSelectInstr(BasicBlock *Pred, BasicBlock *BB, SelectInst *SI,
+  bool simplifyPartiallyRedundantLoad(LoadInst *LI);
+  void unfoldSelectInstr(BasicBlock *Pred, BasicBlock *BB, SelectInst *SI,
                          PHINode *SIUse, unsigned Idx);
 
-  bool TryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB);
-  bool TryToUnfoldSelect(SwitchInst *SI, BasicBlock *BB);
-  bool TryToUnfoldSelectInCurrBB(BasicBlock *BB);
+  bool tryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB);
+  bool tryToUnfoldSelect(SwitchInst *SI, BasicBlock *BB);
+  bool tryToUnfoldSelectInCurrBB(BasicBlock *BB);
 
-  bool ProcessGuards(BasicBlock *BB);
-  bool ThreadGuard(BasicBlock *BB, IntrinsicInst *Guard, BranchInst *BI);
+  bool processGuards(BasicBlock *BB);
+  bool threadGuard(BasicBlock *BB, IntrinsicInst *Guard, BranchInst *BI);
 
 private:
-  BasicBlock *SplitBlockPreds(BasicBlock *BB, ArrayRef<BasicBlock *> Preds,
+  BasicBlock *splitBlockPreds(BasicBlock *BB, ArrayRef<BasicBlock *> Preds,
                               const char *Suffix);
-  void UpdateBlockFreqAndEdgeWeight(BasicBlock *PredBB, BasicBlock *BB,
+  void updateBlockFreqAndEdgeWeight(BasicBlock *PredBB, BasicBlock *BB,
                                     BasicBlock *NewBB, BasicBlock *SuccBB);
   /// Check if the block has profile metadata for its outgoing edges.
   bool doesBlockHaveProfileData(BasicBlock *BB);
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LICM.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LICM.h
index f0ea928..a8f1c53 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/LICM.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LICM.h
@@ -34,6 +34,7 @@
 
 #include "llvm/Analysis/LoopInfo.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/Support/CommandLine.h"
 #include "llvm/Transforms/Scalar/LoopPassManager.h"
 
 namespace llvm {
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopFlatten.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopFlatten.h
new file mode 100644
index 0000000..41f91f0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopFlatten.h
@@ -0,0 +1,32 @@
+//===- LoopFlatten.h - Loop Flatten ----------------  -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface for the Loop Flatten Pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPFLATTEN_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPFLATTEN_H
+
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+class LoopFlattenPass : public PassInfoMixin<LoopFlattenPass> {
+public:
+  LoopFlattenPass() = default;
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPFLATTEN_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h
index d2fff8b..0c6406d 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h
@@ -23,6 +23,19 @@
 class Loop;
 class LPMUpdater;
 
+/// Options to disable Loop Idiom Recognize, which can be shared with other
+/// passes.
+struct DisableLIRP {
+  /// When true, the entire pass is disabled.
+  static bool All;
+
+  /// When true, Memset is disabled.
+  static bool Memset;
+
+  /// When true, Memcpy is disabled.
+  static bool Memcpy;
+};
+
 /// Performs Loop Idiom Recognize Pass.
 class LoopIdiomRecognizePass : public PassInfoMixin<LoopIdiomRecognizePass> {
 public:
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopInterchange.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopInterchange.h
new file mode 100644
index 0000000..9f50fc5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopInterchange.h
@@ -0,0 +1,24 @@
+//===- LoopInterchange.h - Loop interchange pass --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPINTERCHANGE_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPINTERCHANGE_H
+
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+struct LoopInterchangePass : public PassInfoMixin<LoopInterchangePass> {
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPINTERCHANGE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopPassManager.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopPassManager.h
index 61ec585..2a342fc 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopPassManager.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopPassManager.h
@@ -36,39 +36,163 @@
 #ifndef LLVM_TRANSFORMS_SCALAR_LOOPPASSMANAGER_H
 #define LLVM_TRANSFORMS_SCALAR_LOOPPASSMANAGER_H
 
-#include "llvm/ADT/PostOrderIterator.h"
 #include "llvm/ADT/PriorityWorklist.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/BasicAliasAnalysis.h"
-#include "llvm/Analysis/GlobalsModRef.h"
 #include "llvm/Analysis/LoopAnalysisManager.h"
 #include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
-#include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/LoopNestAnalysis.h"
 #include "llvm/IR/Dominators.h"
+#include "llvm/IR/PassInstrumentation.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Transforms/Utils/LCSSA.h"
 #include "llvm/Transforms/Utils/LoopSimplify.h"
+#include "llvm/Transforms/Utils/LoopUtils.h"
+#include <memory>
 
 namespace llvm {
 
 // Forward declarations of an update tracking API used in the pass manager.
 class LPMUpdater;
 
+namespace {
+
+template <typename PassT>
+using HasRunOnLoopT = decltype(std::declval<PassT>().run(
+    std::declval<Loop &>(), std::declval<LoopAnalysisManager &>(),
+    std::declval<LoopStandardAnalysisResults &>(),
+    std::declval<LPMUpdater &>()));
+
+} // namespace
+
 // Explicit specialization and instantiation declarations for the pass manager.
 // See the comments on the definition of the specialization for details on how
 // it differs from the primary template.
 template <>
-PreservedAnalyses
-PassManager<Loop, LoopAnalysisManager, LoopStandardAnalysisResults &,
-            LPMUpdater &>::run(Loop &InitialL, LoopAnalysisManager &AM,
-                               LoopStandardAnalysisResults &AnalysisResults,
-                               LPMUpdater &U);
-extern template class PassManager<Loop, LoopAnalysisManager,
-                                  LoopStandardAnalysisResults &, LPMUpdater &>;
+class PassManager<Loop, LoopAnalysisManager, LoopStandardAnalysisResults &,
+                  LPMUpdater &>
+    : public PassInfoMixin<
+          PassManager<Loop, LoopAnalysisManager, LoopStandardAnalysisResults &,
+                      LPMUpdater &>> {
+public:
+  /// Construct a pass manager.
+  ///
+  /// If \p DebugLogging is true, we'll log our progress to llvm::dbgs().
+  explicit PassManager(bool DebugLogging = false)
+      : DebugLogging(DebugLogging) {}
+
+  // FIXME: These are equivalent to the default move constructor/move
+  // assignment. However, using = default triggers linker errors due to the
+  // explicit instantiations below. Find a way to use the default and remove the
+  // duplicated code here.
+  PassManager(PassManager &&Arg)
+      : IsLoopNestPass(std::move(Arg.IsLoopNestPass)),
+        LoopPasses(std::move(Arg.LoopPasses)),
+        LoopNestPasses(std::move(Arg.LoopNestPasses)),
+        DebugLogging(std::move(Arg.DebugLogging)) {}
+
+  PassManager &operator=(PassManager &&RHS) {
+    IsLoopNestPass = std::move(RHS.IsLoopNestPass);
+    LoopPasses = std::move(RHS.LoopPasses);
+    LoopNestPasses = std::move(RHS.LoopNestPasses);
+    DebugLogging = std::move(RHS.DebugLogging);
+    return *this;
+  }
+
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+
+  /// Add either a loop pass or a loop-nest pass to the pass manager. Append \p
+  /// Pass to the list of loop passes if it has a dedicated \fn run() method for
+  /// loops and to the list of loop-nest passes if the \fn run() method is for
+  /// loop-nests instead. Also append whether \p Pass is loop-nest pass or not
+  /// to the end of \var IsLoopNestPass so we can easily identify the types of
+  /// passes in the pass manager later.
+  template <typename PassT>
+  std::enable_if_t<is_detected<HasRunOnLoopT, PassT>::value>
+  addPass(PassT Pass) {
+    using LoopPassModelT =
+        detail::PassModel<Loop, PassT, PreservedAnalyses, LoopAnalysisManager,
+                          LoopStandardAnalysisResults &, LPMUpdater &>;
+    IsLoopNestPass.push_back(false);
+    LoopPasses.emplace_back(new LoopPassModelT(std::move(Pass)));
+  }
+
+  template <typename PassT>
+  std::enable_if_t<!is_detected<HasRunOnLoopT, PassT>::value>
+  addPass(PassT Pass) {
+    using LoopNestPassModelT =
+        detail::PassModel<LoopNest, PassT, PreservedAnalyses,
+                          LoopAnalysisManager, LoopStandardAnalysisResults &,
+                          LPMUpdater &>;
+    IsLoopNestPass.push_back(true);
+    LoopNestPasses.emplace_back(new LoopNestPassModelT(std::move(Pass)));
+  }
+
+  // Specializations of `addPass` for `RepeatedPass`. These are necessary since
+  // `RepeatedPass` has a templated `run` method that will result in incorrect
+  // detection of `HasRunOnLoopT`.
+  template <typename PassT>
+  std::enable_if_t<is_detected<HasRunOnLoopT, PassT>::value>
+  addPass(RepeatedPass<PassT> Pass) {
+    using RepeatedLoopPassModelT =
+        detail::PassModel<Loop, RepeatedPass<PassT>, PreservedAnalyses,
+                          LoopAnalysisManager, LoopStandardAnalysisResults &,
+                          LPMUpdater &>;
+    IsLoopNestPass.push_back(false);
+    LoopPasses.emplace_back(new RepeatedLoopPassModelT(std::move(Pass)));
+  }
+
+  template <typename PassT>
+  std::enable_if_t<!is_detected<HasRunOnLoopT, PassT>::value>
+  addPass(RepeatedPass<PassT> Pass) {
+    using RepeatedLoopNestPassModelT =
+        detail::PassModel<LoopNest, RepeatedPass<PassT>, PreservedAnalyses,
+                          LoopAnalysisManager, LoopStandardAnalysisResults &,
+                          LPMUpdater &>;
+    IsLoopNestPass.push_back(true);
+    LoopNestPasses.emplace_back(
+        new RepeatedLoopNestPassModelT(std::move(Pass)));
+  }
+
+  bool isEmpty() const { return LoopPasses.empty() && LoopNestPasses.empty(); }
+
+  static bool isRequired() { return true; }
+
+  size_t getNumLoopPasses() const { return LoopPasses.size(); }
+  size_t getNumLoopNestPasses() const { return LoopNestPasses.size(); }
+
+protected:
+  using LoopPassConceptT =
+      detail::PassConcept<Loop, LoopAnalysisManager,
+                          LoopStandardAnalysisResults &, LPMUpdater &>;
+  using LoopNestPassConceptT =
+      detail::PassConcept<LoopNest, LoopAnalysisManager,
+                          LoopStandardAnalysisResults &, LPMUpdater &>;
+
+  // BitVector that identifies whether the passes are loop passes or loop-nest
+  // passes (true for loop-nest passes).
+  BitVector IsLoopNestPass;
+  std::vector<std::unique_ptr<LoopPassConceptT>> LoopPasses;
+  std::vector<std::unique_ptr<LoopNestPassConceptT>> LoopNestPasses;
+
+  /// Flag indicating whether we should do debug logging.
+  bool DebugLogging;
+
+  /// Run either a loop pass or a loop-nest pass. Returns `None` if
+  /// PassInstrumentation's BeforePass returns false. Otherwise, returns the
+  /// preserved analyses of the pass.
+  template <typename IRUnitT, typename PassT>
+  Optional<PreservedAnalyses>
+  runSinglePass(IRUnitT &IR, PassT &Pass, LoopAnalysisManager &AM,
+                LoopStandardAnalysisResults &AR, LPMUpdater &U,
+                PassInstrumentation &PI);
+
+  PreservedAnalyses runWithLoopNestPasses(Loop &L, LoopAnalysisManager &AM,
+                                          LoopStandardAnalysisResults &AR,
+                                          LPMUpdater &U);
+  PreservedAnalyses runWithoutLoopNestPasses(Loop &L, LoopAnalysisManager &AM,
+                                             LoopStandardAnalysisResults &AR,
+                                             LPMUpdater &U);
+};
 
 /// The Loop pass manager.
 ///
@@ -101,41 +225,7 @@
     RequireAnalysisPass<AnalysisT, Loop, LoopAnalysisManager,
                         LoopStandardAnalysisResults &, LPMUpdater &>;
 
-namespace internal {
-/// Helper to implement appending of loops onto a worklist.
-///
-/// We want to process loops in postorder, but the worklist is a LIFO data
-/// structure, so we append to it in *reverse* postorder.
-///
-/// For trees, a preorder traversal is a viable reverse postorder, so we
-/// actually append using a preorder walk algorithm.
-template <typename RangeT>
-inline void appendLoopsToWorklist(RangeT &&Loops,
-                                  SmallPriorityWorklist<Loop *, 4> &Worklist) {
-  // We use an internal worklist to build up the preorder traversal without
-  // recursion.
-  SmallVector<Loop *, 4> PreOrderLoops, PreOrderWorklist;
-
-  // We walk the initial sequence of loops in reverse because we generally want
-  // to visit defs before uses and the worklist is LIFO.
-  for (Loop *RootL : reverse(Loops)) {
-    assert(PreOrderLoops.empty() && "Must start with an empty preorder walk.");
-    assert(PreOrderWorklist.empty() &&
-           "Must start with an empty preorder walk worklist.");
-    PreOrderWorklist.push_back(RootL);
-    do {
-      Loop *L = PreOrderWorklist.pop_back_val();
-      PreOrderWorklist.append(L->begin(), L->end());
-      PreOrderLoops.push_back(L);
-    } while (!PreOrderWorklist.empty());
-
-    Worklist.insert(std::move(PreOrderLoops));
-    PreOrderLoops.clear();
-  }
-}
-}
-
-template <typename LoopPassT> class FunctionToLoopPassAdaptor;
+class FunctionToLoopPassAdaptor;
 
 /// This class provides an interface for updating the loop pass manager based
 /// on mutations to the loop nest.
@@ -143,6 +233,13 @@
 /// A reference to an instance of this class is passed as an argument to each
 /// Loop pass, and Loop passes should use it to update LPM infrastructure if
 /// they modify the loop nest structure.
+///
+/// \c LPMUpdater comes with two modes: the loop mode and the loop-nest mode. In
+/// loop mode, all the loops in the function will be pushed into the worklist
+/// and when new loops are added to the pipeline, their subloops are also
+/// inserted recursively. On the other hand, in loop-nest mode, only top-level
+/// loops are contained in the worklist and the addition of new (top-level)
+/// loops will not trigger the addition of their subloops.
 class LPMUpdater {
 public:
   /// This can be queried by loop passes which run other loop passes (like pass
@@ -164,6 +261,8 @@
   /// state, this routine will mark that the current loop should be skipped by
   /// the rest of the pass management infrastructure.
   void markLoopAsDeleted(Loop &L, llvm::StringRef Name) {
+    assert((!LoopNestMode || L.isOutermost()) &&
+           "L should be a top-level loop in loop-nest mode.");
     LAM.clear(L, Name);
     assert((&L == CurrentL || CurrentL->contains(&L)) &&
            "Cannot delete a loop outside of the "
@@ -179,6 +278,8 @@
   /// loops within them will be visited in postorder as usual for the loop pass
   /// manager.
   void addChildLoops(ArrayRef<Loop *> NewChildLoops) {
+    assert(!LoopNestMode &&
+           "Child loops should not be pushed in loop-nest mode.");
     // Insert ourselves back into the worklist first, as this loop should be
     // revisited after all the children have been processed.
     Worklist.insert(CurrentL);
@@ -190,7 +291,7 @@
                                                   "the current loop!");
 #endif
 
-    internal::appendLoopsToWorklist(NewChildLoops, Worklist);
+    appendLoopsToWorklist(NewChildLoops, Worklist);
 
     // Also skip further processing of the current loop--it will be revisited
     // after all of its newly added children are accounted for.
@@ -210,7 +311,10 @@
              "All of the new loops must be siblings of the current loop!");
 #endif
 
-    internal::appendLoopsToWorklist(NewSibLoops, Worklist);
+    if (LoopNestMode)
+      Worklist.insert(NewSibLoops);
+    else
+      appendLoopsToWorklist(NewSibLoops, Worklist);
 
     // No need to skip the current loop or revisit it, as sibling loops
     // shouldn't impact anything.
@@ -230,7 +334,7 @@
   }
 
 private:
-  template <typename LoopPassT> friend class llvm::FunctionToLoopPassAdaptor;
+  friend class llvm::FunctionToLoopPassAdaptor;
 
   /// The \c FunctionToLoopPassAdaptor's worklist of loops to process.
   SmallPriorityWorklist<Loop *, 4> &Worklist;
@@ -240,6 +344,7 @@
 
   Loop *CurrentL;
   bool SkipCurrentLoop;
+  const bool LoopNestMode;
 
 #ifndef NDEBUG
   // In debug builds we also track the parent loop to implement asserts even in
@@ -248,10 +353,33 @@
 #endif
 
   LPMUpdater(SmallPriorityWorklist<Loop *, 4> &Worklist,
-             LoopAnalysisManager &LAM)
-      : Worklist(Worklist), LAM(LAM) {}
+             LoopAnalysisManager &LAM, bool LoopNestMode = false)
+      : Worklist(Worklist), LAM(LAM), LoopNestMode(LoopNestMode) {}
 };
 
+template <typename IRUnitT, typename PassT>
+Optional<PreservedAnalyses> LoopPassManager::runSinglePass(
+    IRUnitT &IR, PassT &Pass, LoopAnalysisManager &AM,
+    LoopStandardAnalysisResults &AR, LPMUpdater &U, PassInstrumentation &PI) {
+  // Check the PassInstrumentation's BeforePass callbacks before running the
+  // pass, skip its execution completely if asked to (callback returns false).
+  if (!PI.runBeforePass<IRUnitT>(*Pass, IR))
+    return None;
+
+  PreservedAnalyses PA;
+  {
+    TimeTraceScope TimeScope(Pass->name(), IR.getName());
+    PA = Pass->run(IR, AM, AR, U);
+  }
+
+  // do not pass deleted Loop into the instrumentation
+  if (U.skipCurrentLoop())
+    PI.runAfterPassInvalidated<IRUnitT>(*Pass, PA);
+  else
+    PI.runAfterPass<IRUnitT>(*Pass, IR, PA);
+  return PA;
+}
+
 /// Adaptor that maps from a function to its loops.
 ///
 /// Designed to allow composition of a LoopPass(Manager) and a
@@ -259,152 +387,107 @@
 /// FunctionAnalysisManager it will run the \c LoopAnalysisManagerFunctionProxy
 /// analysis prior to running the loop passes over the function to enable a \c
 /// LoopAnalysisManager to be used within this run safely.
-template <typename LoopPassT>
+///
+/// The adaptor comes with two modes: the loop mode and the loop-nest mode, and
+/// the worklist updater lived inside will be in the same mode as the adaptor
+/// (refer to the documentation of \c LPMUpdater for more detailed explanation).
+/// Specifically, in loop mode, all loops in the funciton will be pushed into
+/// the worklist and processed by \p Pass, while only top-level loops are
+/// processed in loop-nest mode. Please refer to the various specializations of
+/// \fn createLoopFunctionToLoopPassAdaptor to see when loop mode and loop-nest
+/// mode are used.
 class FunctionToLoopPassAdaptor
-    : public PassInfoMixin<FunctionToLoopPassAdaptor<LoopPassT>> {
+    : public PassInfoMixin<FunctionToLoopPassAdaptor> {
 public:
-  explicit FunctionToLoopPassAdaptor(LoopPassT Pass, bool DebugLogging = false)
-      : Pass(std::move(Pass)), LoopCanonicalizationFPM(DebugLogging) {
+  using PassConceptT =
+      detail::PassConcept<Loop, LoopAnalysisManager,
+                          LoopStandardAnalysisResults &, LPMUpdater &>;
+
+  explicit FunctionToLoopPassAdaptor(std::unique_ptr<PassConceptT> Pass,
+                                     bool UseMemorySSA = false,
+                                     bool UseBlockFrequencyInfo = false,
+                                     bool DebugLogging = false,
+                                     bool LoopNestMode = false)
+      : Pass(std::move(Pass)), LoopCanonicalizationFPM(DebugLogging),
+        UseMemorySSA(UseMemorySSA),
+        UseBlockFrequencyInfo(UseBlockFrequencyInfo),
+        LoopNestMode(LoopNestMode) {
     LoopCanonicalizationFPM.addPass(LoopSimplifyPass());
     LoopCanonicalizationFPM.addPass(LCSSAPass());
   }
 
   /// Runs the loop passes across every loop in the function.
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM) {
-    // Before we even compute any loop analyses, first run a miniature function
-    // pass pipeline to put loops into their canonical form. Note that we can
-    // directly build up function analyses after this as the function pass
-    // manager handles all the invalidation at that layer.
-    PassInstrumentation PI = AM.getResult<PassInstrumentationAnalysis>(F);
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 
-    PreservedAnalyses PA = PreservedAnalyses::all();
-    // Check the PassInstrumentation's BeforePass callbacks before running the
-    // canonicalization pipeline.
-    if (PI.runBeforePass<Function>(LoopCanonicalizationFPM, F)) {
-      PA = LoopCanonicalizationFPM.run(F, AM);
-      PI.runAfterPass<Function>(LoopCanonicalizationFPM, F);
-    }
+  static bool isRequired() { return true; }
 
-    // Get the loop structure for this function
-    LoopInfo &LI = AM.getResult<LoopAnalysis>(F);
-
-    // If there are no loops, there is nothing to do here.
-    if (LI.empty())
-      return PA;
-
-    // Get the analysis results needed by loop passes.
-    MemorySSA *MSSA = EnableMSSALoopDependency
-                          ? (&AM.getResult<MemorySSAAnalysis>(F).getMSSA())
-                          : nullptr;
-    LoopStandardAnalysisResults LAR = {AM.getResult<AAManager>(F),
-                                       AM.getResult<AssumptionAnalysis>(F),
-                                       AM.getResult<DominatorTreeAnalysis>(F),
-                                       AM.getResult<LoopAnalysis>(F),
-                                       AM.getResult<ScalarEvolutionAnalysis>(F),
-                                       AM.getResult<TargetLibraryAnalysis>(F),
-                                       AM.getResult<TargetIRAnalysis>(F),
-                                       MSSA};
-
-    // Setup the loop analysis manager from its proxy. It is important that
-    // this is only done when there are loops to process and we have built the
-    // LoopStandardAnalysisResults object. The loop analyses cached in this
-    // manager have access to those analysis results and so it must invalidate
-    // itself when they go away.
-    LoopAnalysisManager &LAM =
-        AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
-
-    // A postorder worklist of loops to process.
-    SmallPriorityWorklist<Loop *, 4> Worklist;
-
-    // Register the worklist and loop analysis manager so that loop passes can
-    // update them when they mutate the loop nest structure.
-    LPMUpdater Updater(Worklist, LAM);
-
-    // Add the loop nests in the reverse order of LoopInfo. For some reason,
-    // they are stored in RPO w.r.t. the control flow graph in LoopInfo. For
-    // the purpose of unrolling, loop deletion, and LICM, we largely want to
-    // work forward across the CFG so that we visit defs before uses and can
-    // propagate simplifications from one loop nest into the next.
-    // FIXME: Consider changing the order in LoopInfo.
-    internal::appendLoopsToWorklist(reverse(LI), Worklist);
-
-    do {
-      Loop *L = Worklist.pop_back_val();
-
-      // Reset the update structure for this loop.
-      Updater.CurrentL = L;
-      Updater.SkipCurrentLoop = false;
-
-#ifndef NDEBUG
-      // Save a parent loop pointer for asserts.
-      Updater.ParentL = L->getParentLoop();
-
-      // Verify the loop structure and LCSSA form before visiting the loop.
-      L->verifyLoop();
-      assert(L->isRecursivelyLCSSAForm(LAR.DT, LI) &&
-             "Loops must remain in LCSSA form!");
-#endif
-      // Check the PassInstrumentation's BeforePass callbacks before running the
-      // pass, skip its execution completely if asked to (callback returns
-      // false).
-      if (!PI.runBeforePass<Loop>(Pass, *L))
-        continue;
-      PreservedAnalyses PassPA = Pass.run(*L, LAM, LAR, Updater);
-
-      // Do not pass deleted Loop into the instrumentation.
-      if (Updater.skipCurrentLoop())
-        PI.runAfterPassInvalidated<Loop>(Pass);
-      else
-        PI.runAfterPass<Loop>(Pass, *L);
-
-      // FIXME: We should verify the set of analyses relevant to Loop passes
-      // are preserved.
-
-      // If the loop hasn't been deleted, we need to handle invalidation here.
-      if (!Updater.skipCurrentLoop())
-        // We know that the loop pass couldn't have invalidated any other
-        // loop's analyses (that's the contract of a loop pass), so directly
-        // handle the loop analysis manager's invalidation here.
-        LAM.invalidate(*L, PassPA);
-
-      // Then intersect the preserved set so that invalidation of module
-      // analyses will eventually occur when the module pass completes.
-      PA.intersect(std::move(PassPA));
-    } while (!Worklist.empty());
-
-    // By definition we preserve the proxy. We also preserve all analyses on
-    // Loops. This precludes *any* invalidation of loop analyses by the proxy,
-    // but that's OK because we've taken care to invalidate analyses in the
-    // loop analysis manager incrementally above.
-    PA.preserveSet<AllAnalysesOn<Loop>>();
-    PA.preserve<LoopAnalysisManagerFunctionProxy>();
-    // We also preserve the set of standard analyses.
-    PA.preserve<DominatorTreeAnalysis>();
-    PA.preserve<LoopAnalysis>();
-    PA.preserve<ScalarEvolutionAnalysis>();
-    if (EnableMSSALoopDependency)
-      PA.preserve<MemorySSAAnalysis>();
-    // FIXME: What we really want to do here is preserve an AA category, but
-    // that concept doesn't exist yet.
-    PA.preserve<AAManager>();
-    PA.preserve<BasicAA>();
-    PA.preserve<GlobalsAA>();
-    PA.preserve<SCEVAA>();
-    return PA;
-  }
+  bool isLoopNestMode() const { return LoopNestMode; }
 
 private:
-  LoopPassT Pass;
+  std::unique_ptr<PassConceptT> Pass;
 
   FunctionPassManager LoopCanonicalizationFPM;
+
+  bool UseMemorySSA = false;
+  bool UseBlockFrequencyInfo = false;
+  const bool LoopNestMode;
 };
 
 /// A function to deduce a loop pass type and wrap it in the templated
 /// adaptor.
+///
+/// If \p Pass is a loop pass, the returned adaptor will be in loop mode.
 template <typename LoopPassT>
-FunctionToLoopPassAdaptor<LoopPassT>
-createFunctionToLoopPassAdaptor(LoopPassT Pass, bool DebugLogging = false) {
-  return FunctionToLoopPassAdaptor<LoopPassT>(std::move(Pass), DebugLogging);
+inline std::enable_if_t<is_detected<HasRunOnLoopT, LoopPassT>::value,
+                        FunctionToLoopPassAdaptor>
+createFunctionToLoopPassAdaptor(LoopPassT Pass, bool UseMemorySSA = false,
+                                bool UseBlockFrequencyInfo = false,
+                                bool DebugLogging = false) {
+  using PassModelT =
+      detail::PassModel<Loop, LoopPassT, PreservedAnalyses, LoopAnalysisManager,
+                        LoopStandardAnalysisResults &, LPMUpdater &>;
+  return FunctionToLoopPassAdaptor(
+      std::make_unique<PassModelT>(std::move(Pass)), UseMemorySSA,
+      UseBlockFrequencyInfo, DebugLogging, false);
+}
+
+/// If \p Pass is a loop-nest pass, \p Pass will first be wrapped into a
+/// \c LoopPassManager and the returned adaptor will be in loop-nest mode.
+template <typename LoopNestPassT>
+inline std::enable_if_t<!is_detected<HasRunOnLoopT, LoopNestPassT>::value,
+                        FunctionToLoopPassAdaptor>
+createFunctionToLoopPassAdaptor(LoopNestPassT Pass, bool UseMemorySSA = false,
+                                bool UseBlockFrequencyInfo = false,
+                                bool DebugLogging = false) {
+  LoopPassManager LPM(DebugLogging);
+  LPM.addPass(std::move(Pass));
+  using PassModelT =
+      detail::PassModel<Loop, LoopPassManager, PreservedAnalyses,
+                        LoopAnalysisManager, LoopStandardAnalysisResults &,
+                        LPMUpdater &>;
+  return FunctionToLoopPassAdaptor(std::make_unique<PassModelT>(std::move(LPM)),
+                                   UseMemorySSA, UseBlockFrequencyInfo,
+                                   DebugLogging, true);
+}
+
+/// If \p Pass is an instance of \c LoopPassManager, the returned adaptor will
+/// be in loop-nest mode if the pass manager contains only loop-nest passes.
+template <>
+inline FunctionToLoopPassAdaptor
+createFunctionToLoopPassAdaptor<LoopPassManager>(LoopPassManager LPM,
+                                                 bool UseMemorySSA,
+                                                 bool UseBlockFrequencyInfo,
+                                                 bool DebugLogging) {
+  // Check if LPM contains any loop pass and if it does not, returns an adaptor
+  // in loop-nest mode.
+  using PassModelT =
+      detail::PassModel<Loop, LoopPassManager, PreservedAnalyses,
+                        LoopAnalysisManager, LoopStandardAnalysisResults &,
+                        LPMUpdater &>;
+  bool LoopNestMode = (LPM.getNumLoopPasses() == 0);
+  return FunctionToLoopPassAdaptor(std::make_unique<PassModelT>(std::move(LPM)),
+                                   UseMemorySSA, UseBlockFrequencyInfo,
+                                   DebugLogging, LoopNestMode);
 }
 
 /// Pass for printing a loop's contents as textual IR.
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopReroll.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopReroll.h
new file mode 100644
index 0000000..6ae309e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopReroll.h
@@ -0,0 +1,27 @@
+//===- LoopReroll.h - Loop rerolling pass ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPREROLL_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPREROLL_H
+
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+class Function;
+
+class LoopRerollPass : public PassInfoMixin<LoopRerollPass> {
+public:
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPREROLL_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopUnrollAndJamPass.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopUnrollAndJamPass.h
index 7920269..bd83a6a 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopUnrollAndJamPass.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopUnrollAndJamPass.h
@@ -9,15 +9,10 @@
 #ifndef LLVM_TRANSFORMS_SCALAR_LOOPUNROLLANDJAMPASS_H
 #define LLVM_TRANSFORMS_SCALAR_LOOPUNROLLANDJAMPASS_H
 
-#include "llvm/Analysis/LoopAnalysisManager.h"
-#include "llvm/Analysis/LoopInfo.h"
 #include "llvm/IR/PassManager.h"
 
 namespace llvm {
-
-class Loop;
-struct LoopStandardAnalysisResults;
-class LPMUpdater;
+class Function;
 
 /// A simple loop rotation transformation.
 class LoopUnrollAndJamPass : public PassInfoMixin<LoopUnrollAndJamPass> {
@@ -25,8 +20,7 @@
 
 public:
   explicit LoopUnrollAndJamPass(int OptLevel = 2) : OptLevel(OptLevel) {}
-  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
-                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 };
 
 } // end namespace llvm
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopUnrollPass.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopUnrollPass.h
index a84d889..7b049bd 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopUnrollPass.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopUnrollPass.h
@@ -12,6 +12,7 @@
 #include "llvm/ADT/Optional.h"
 #include "llvm/Analysis/LoopAnalysisManager.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/Support/CommandLine.h"
 
 namespace llvm {
 
@@ -62,6 +63,8 @@
   Optional<bool> AllowPeeling;
   Optional<bool> AllowRuntime;
   Optional<bool> AllowUpperBound;
+  Optional<bool> AllowProfileBasedPeeling;
+  Optional<unsigned> FullUnrollMaxCount;
   int OptLevel;
 
   /// If false, use a cost model to determine whether unrolling of a loop is
@@ -110,6 +113,18 @@
     OptLevel = O;
     return *this;
   }
+
+  // Enables or disables loop peeling basing on profile.
+  LoopUnrollOptions &setProfileBasedPeeling(int O) {
+    AllowProfileBasedPeeling = O;
+    return *this;
+  }
+
+  // Sets the max full unroll count.
+  LoopUnrollOptions &setFullUnrollMaxCount(unsigned O) {
+    FullUnrollMaxCount = O;
+    return *this;
+  }
 };
 
 /// Loop unroll pass that will support both full and partial unrolling.
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopVersioningLICM.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopVersioningLICM.h
new file mode 100644
index 0000000..87d6d67
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopVersioningLICM.h
@@ -0,0 +1,25 @@
+//===- LoopVersioningLICM.h - LICM Loop Versioning ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPVERSIONINGLICM_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPVERSIONINGLICM_H
+
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+class LoopVersioningLICMPass : public PassInfoMixin<LoopVersioningLICMPass> {
+public:
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &LAR, LPMUpdater &U);
+};
+
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPVERSIONINGLICM_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LowerAtomic.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerAtomic.h
index 40f8ca5..1d55508 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/LowerAtomic.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerAtomic.h
@@ -22,6 +22,7 @@
 class LowerAtomicPass : public PassInfoMixin<LowerAtomicPass> {
 public:
   PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
+  static bool isRequired() { return true; }
 };
 }
 
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LowerConstantIntrinsics.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerConstantIntrinsics.h
new file mode 100644
index 0000000..a5ad4a2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerConstantIntrinsics.h
@@ -0,0 +1,41 @@
+//===- LowerConstantIntrinsics.h - Lower constant int. pass -*- C++ -*-========//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// The header file for the LowerConstantIntrinsics pass as used by the new pass
+/// manager.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOWERCONSTANTINTRINSICS_H
+#define LLVM_TRANSFORMS_SCALAR_LOWERCONSTANTINTRINSICS_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+struct LowerConstantIntrinsicsPass :
+    PassInfoMixin<LowerConstantIntrinsicsPass> {
+public:
+  explicit LowerConstantIntrinsicsPass() {}
+
+  /// Run the pass over the function.
+  ///
+  /// This will lower all remaining 'objectsize' and 'is.constant'`
+  /// intrinsic calls in this function, even when the argument has no known
+  /// size or is not a constant respectively. The resulting constant is
+  /// propagated and conditional branches are resolved where possible.
+  /// This complements the Instruction Simplification and
+  /// Instruction Combination passes of the optimized pass chain.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
+};
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h
index 4e47ff7..22b2e64 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h
@@ -17,6 +17,7 @@
 
 #include "llvm/IR/Function.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/Support/CommandLine.h"
 
 namespace llvm {
 
@@ -31,6 +32,8 @@
   PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
 };
 
+extern cl::opt<uint32_t> LikelyBranchWeight;
+extern cl::opt<uint32_t> UnlikelyBranchWeight;
 }
 
 #endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LowerMatrixIntrinsics.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerMatrixIntrinsics.h
new file mode 100644
index 0000000..a2a31d3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerMatrixIntrinsics.h
@@ -0,0 +1,30 @@
+//===- LowerMatrixIntrinsics.h - Lower matrix intrinsics. -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass lowers matrix intrinsics down to vector operations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOWERMATRIXINTRINSICSPASS_H
+#define LLVM_TRANSFORMS_SCALAR_LOWERMATRIXINTRINSICSPASS_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class LowerMatrixIntrinsicsPass
+    : public PassInfoMixin<LowerMatrixIntrinsicsPass> {
+  bool Minimal;
+
+public:
+  LowerMatrixIntrinsicsPass(bool Minimal = false) : Minimal(Minimal) {}
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  static bool isRequired() { return true; }
+};
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/MemCpyOptimizer.h b/linux-x64/clang/include/llvm/Transforms/Scalar/MemCpyOptimizer.h
index 5386f58..635b706 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/MemCpyOptimizer.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/MemCpyOptimizer.h
@@ -14,23 +14,26 @@
 #ifndef LLVM_TRANSFORMS_SCALAR_MEMCPYOPTIMIZER_H
 #define LLVM_TRANSFORMS_SCALAR_MEMCPYOPTIMIZER_H
 
-#include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/CallSite.h"
 #include "llvm/IR/PassManager.h"
 #include <cstdint>
 #include <functional>
 
 namespace llvm {
 
+class AAResults;
 class AssumptionCache;
+class CallBase;
 class CallInst;
 class DominatorTree;
 class Function;
 class Instruction;
+class LoadInst;
 class MemCpyInst;
 class MemMoveInst;
 class MemoryDependenceResults;
+class MemorySSA;
+class MemorySSAUpdater;
 class MemSetInst;
 class StoreInst;
 class TargetLibraryInfo;
@@ -39,9 +42,11 @@
 class MemCpyOptPass : public PassInfoMixin<MemCpyOptPass> {
   MemoryDependenceResults *MD = nullptr;
   TargetLibraryInfo *TLI = nullptr;
-  std::function<AliasAnalysis &()> LookupAliasAnalysis;
-  std::function<AssumptionCache &()> LookupAssumptionCache;
-  std::function<DominatorTree &()> LookupDomTree;
+  AAResults *AA = nullptr;
+  AssumptionCache *AC = nullptr;
+  DominatorTree *DT = nullptr;
+  MemorySSA *MSSA = nullptr;
+  MemorySSAUpdater *MSSAU = nullptr;
 
 public:
   MemCpyOptPass() = default;
@@ -49,27 +54,28 @@
   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 
   // Glue for the old PM.
-  bool runImpl(Function &F, MemoryDependenceResults *MD_,
-               TargetLibraryInfo *TLI_,
-               std::function<AliasAnalysis &()> LookupAliasAnalysis_,
-               std::function<AssumptionCache &()> LookupAssumptionCache_,
-               std::function<DominatorTree &()> LookupDomTree_);
+  bool runImpl(Function &F, MemoryDependenceResults *MD, TargetLibraryInfo *TLI,
+               AAResults *AA, AssumptionCache *AC, DominatorTree *DT,
+               MemorySSA *MSSA);
 
 private:
   // Helper functions
   bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
   bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI);
-  bool processMemCpy(MemCpyInst *M);
+  bool processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI);
   bool processMemMove(MemMoveInst *M);
-  bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc,
-                            uint64_t cpyLen, unsigned cpyAlign, CallInst *C);
+  bool performCallSlotOptzn(Instruction *cpyLoad, Instruction *cpyStore,
+                            Value *cpyDst, Value *cpySrc, uint64_t cpyLen,
+                            Align cpyAlign, CallInst *C);
   bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep);
-  bool processMemSetMemCpyDependence(MemCpyInst *M, MemSetInst *MDep);
-  bool performMemCpyToMemSetOptzn(MemCpyInst *M, MemSetInst *MDep);
-  bool processByValArgument(CallSite CS, unsigned ArgNo);
+  bool processMemSetMemCpyDependence(MemCpyInst *MemCpy, MemSetInst *MemSet);
+  bool performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, MemSetInst *MemSet);
+  bool processByValArgument(CallBase &CB, unsigned ArgNo);
   Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr,
                                     Value *ByteVal);
+  bool moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI);
 
+  void eraseInstruction(Instruction *I);
   bool iterateOnFunction(Function &F);
 };
 
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h b/linux-x64/clang/include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h
index 9071a56..c5f6d6e 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h
@@ -27,12 +27,28 @@
 #include "llvm/IR/PassManager.h"
 
 namespace llvm {
-class MergedLoadStoreMotionPass
-    : public PassInfoMixin<MergedLoadStoreMotionPass> {
-public:
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+struct MergedLoadStoreMotionOptions {
+  bool SplitFooterBB;
+  MergedLoadStoreMotionOptions(bool SplitFooterBB = false)
+      : SplitFooterBB(SplitFooterBB) {}
+
+  MergedLoadStoreMotionOptions &splitFooterBB(bool SFBB) {
+    SplitFooterBB = SFBB;
+    return *this;
+  }
 };
 
+class MergedLoadStoreMotionPass
+    : public PassInfoMixin<MergedLoadStoreMotionPass> {
+  MergedLoadStoreMotionOptions Options;
+
+public:
+  MergedLoadStoreMotionPass()
+      : MergedLoadStoreMotionPass(MergedLoadStoreMotionOptions()) {}
+  MergedLoadStoreMotionPass(const MergedLoadStoreMotionOptions &PassOptions)
+      : Options(PassOptions) {}
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
 }
 
 #endif // LLVM_TRANSFORMS_SCALAR_MERGEDLOADSTOREMOTION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/NaryReassociate.h b/linux-x64/clang/include/llvm/Transforms/Scalar/NaryReassociate.h
index 26f5fe1..5fa7427 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/NaryReassociate.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/NaryReassociate.h
@@ -114,7 +114,7 @@
   bool doOneIteration(Function &F);
 
   // Reassociates I for better CSE.
-  Instruction *tryReassociate(Instruction *I);
+  Instruction *tryReassociate(Instruction *I, const SCEV *&OrigSCEV);
 
   // Reassociate GEP for better CSE.
   Instruction *tryReassociateGEP(GetElementPtrInst *GEP);
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/Reassociate.h b/linux-x64/clang/include/llvm/Transforms/Scalar/Reassociate.h
index 2db8d8c..28794d2 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/Reassociate.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/Reassociate.h
@@ -25,7 +25,6 @@
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/PostOrderIterator.h"
 #include "llvm/ADT/SetVector.h"
-#include "llvm/IR/IRBuilder.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/IR/ValueHandle.h"
 #include <deque>
@@ -37,6 +36,7 @@
 class BinaryOperator;
 class Function;
 class Instruction;
+class IRBuilderBase;
 class Value;
 
 /// A private "module" namespace for types and utilities used by Reassociate.
@@ -114,7 +114,7 @@
   bool CombineXorOpnd(Instruction *I, reassociate::XorOpnd *Opnd1,
                       reassociate::XorOpnd *Opnd2, APInt &ConstOpnd,
                       Value *&Res);
-  Value *buildMinimalMultiplyDAG(IRBuilder<> &Builder,
+  Value *buildMinimalMultiplyDAG(IRBuilderBase &Builder,
                                  SmallVectorImpl<reassociate::Factor> &Factors);
   Value *OptimizeMul(BinaryOperator *I,
                      SmallVectorImpl<reassociate::ValueEntry> &Ops);
@@ -122,7 +122,9 @@
   void EraseInst(Instruction *I);
   void RecursivelyEraseDeadInsts(Instruction *I, OrderedSet &Insts);
   void OptimizeInst(Instruction *I);
-  Instruction *canonicalizeNegConstExpr(Instruction *I);
+  Instruction *canonicalizeNegFPConstantsForOp(Instruction *I, Instruction *Op,
+                                               Value *OtherOp);
+  Instruction *canonicalizeNegFPConstants(Instruction *I);
   void BuildPairMap(ReversePostOrderTraversal<Function *> &RPOT);
 };
 
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/Reg2Mem.h b/linux-x64/clang/include/llvm/Transforms/Scalar/Reg2Mem.h
new file mode 100644
index 0000000..25f6563
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/Reg2Mem.h
@@ -0,0 +1,27 @@
+//===- Reg2Mem.h - Convert registers to allocas -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface for the RegToMem Pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_REG2MEM_H
+#define LLVM_TRANSFORMS_SCALAR_REG2MEM_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class RegToMemPass : public PassInfoMixin<RegToMemPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_REG2MEM_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/SCCP.h b/linux-x64/clang/include/llvm/Transforms/Scalar/SCCP.h
index 0ffd983..45e674a 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/SCCP.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/SCCP.h
@@ -45,7 +45,8 @@
   PostDominatorTree *PDT;
 };
 
-bool runIPSCCP(Module &M, const DataLayout &DL, const TargetLibraryInfo *TLI,
+bool runIPSCCP(Module &M, const DataLayout &DL,
+               std::function<const TargetLibraryInfo &(Function &)> GetTLI,
                function_ref<AnalysisResultsForFn(Function &)> getAnalysis);
 } // end namespace llvm
 
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/SROA.h b/linux-x64/clang/include/llvm/Transforms/Scalar/SROA.h
index 864a0cb..6ef7c6b 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/SROA.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/SROA.h
@@ -18,6 +18,7 @@
 #include "llvm/ADT/SetVector.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/IR/ValueHandle.h"
 #include <vector>
 
 namespace llvm {
@@ -77,8 +78,8 @@
 
   /// A collection of instructions to delete.
   /// We try to batch deletions to simplify code and make things a bit more
-  /// efficient.
-  SetVector<Instruction *, SmallVector<Instruction *, 8>> DeadInsts;
+  /// efficient. We also make sure there is no dangling pointers.
+  SmallVector<WeakVH, 8> DeadInsts;
 
   /// Post-promotion worklist.
   ///
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h b/linux-x64/clang/include/llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h
new file mode 100644
index 0000000..19339ca
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h
@@ -0,0 +1,29 @@
+//===- ScalarizeMaskedMemIntrin.h - Scalarize unsupported masked mem ----===//
+//                                    instrinsics
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass replaces masked memory intrinsics - when unsupported by the target
+// - with a chain of basic blocks, that deal with the elements one-by-one if the
+// appropriate mask bit is set.
+//
+//===----------------------------------------------------------------------===//
+//
+#ifndef LLVM_TRANSFORMS_SCALAR_SCALARIZE_MASKED_MEMINTRIN_H
+#define LLVM_TRANSFORMS_SCALAR_SCALARIZE_MASKED_MEMINTRIN_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+struct ScalarizeMaskedMemIntrinPass
+    : public PassInfoMixin<ScalarizeMaskedMemIntrinPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/SeparateConstOffsetFromGEP.h b/linux-x64/clang/include/llvm/Transforms/Scalar/SeparateConstOffsetFromGEP.h
new file mode 100644
index 0000000..5bd6ce1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/SeparateConstOffsetFromGEP.h
@@ -0,0 +1,27 @@
+//===- SeparateConstOffsetFromGEP.h ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SEPARATECONSTOFFSETFROMGEP_H
+#define LLVM_TRANSFORMS_SCALAR_SEPARATECONSTOFFSETFROMGEP_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class SeparateConstOffsetFromGEPPass
+    : public PassInfoMixin<SeparateConstOffsetFromGEPPass> {
+  bool LowerGEP;
+
+public:
+  SeparateConstOffsetFromGEPPass(bool LowerGEP = false) : LowerGEP(LowerGEP) {}
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_SEPARATECONSTOFFSETFROMGEP_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/SimplifyCFG.h b/linux-x64/clang/include/llvm/Transforms/Scalar/SimplifyCFG.h
index f9792d3..7c53938 100644
--- a/linux-x64/clang/include/llvm/Transforms/Scalar/SimplifyCFG.h
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/SimplifyCFG.h
@@ -14,9 +14,9 @@
 #ifndef LLVM_TRANSFORMS_SCALAR_SIMPLIFYCFG_H
 #define LLVM_TRANSFORMS_SCALAR_SIMPLIFYCFG_H
 
-#include "llvm/Transforms/Utils/Local.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Utils/SimplifyCFGOptions.h"
 
 namespace llvm {
 
@@ -34,13 +34,7 @@
   /// rather than optimal IR. That is, by default we bypass transformations that
   /// are likely to improve performance but make analysis for other passes more
   /// difficult.
-  SimplifyCFGPass()
-      : SimplifyCFGPass(SimplifyCFGOptions()
-                            .forwardSwitchCondToPhi(false)
-                            .convertSwitchToLookupTable(false)
-                            .needCanonicalLoops(true)
-                            .sinkCommonInsts(false)) {}
-
+  SimplifyCFGPass();
 
   /// Construct a pass with optional optimizations.
   SimplifyCFGPass(const SimplifyCFGOptions &PassOptions);
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/StraightLineStrengthReduce.h b/linux-x64/clang/include/llvm/Transforms/Scalar/StraightLineStrengthReduce.h
new file mode 100644
index 0000000..11233cc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/StraightLineStrengthReduce.h
@@ -0,0 +1,24 @@
+//===- StraightLineStrengthReduce.h - -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_STRAIGHTLINESTRENGTHREDUCE_H
+#define LLVM_TRANSFORMS_SCALAR_STRAIGHTLINESTRENGTHREDUCE_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class StraightLineStrengthReducePass
+    : public PassInfoMixin<StraightLineStrengthReducePass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_STRAIGHTLINESTRENGTHREDUCE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/StructurizeCFG.h b/linux-x64/clang/include/llvm/Transforms/Scalar/StructurizeCFG.h
new file mode 100644
index 0000000..50d41ac
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/StructurizeCFG.h
@@ -0,0 +1,20 @@
+//===- StructurizeCFG.h ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_STRUCTURIZECFG_H
+#define LLVM_TRANSFORMS_SCALAR_STRUCTURIZECFG_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+struct StructurizeCFGPass : PassInfoMixin<StructurizeCFGPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_STRUCTURIZECFG_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils.h b/linux-x64/clang/include/llvm/Transforms/Utils.h
index 6e03453..9162a86 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils.h
@@ -26,6 +26,12 @@
 ModulePass *createMetaRenamerPass();
 
 //===----------------------------------------------------------------------===//
+// createUniqueInternalLinkageNamesPass - Make internal linkage symbol names
+// unique.
+//
+ModulePass *createUniqueInternalLinkageNamesPass();
+
+//===----------------------------------------------------------------------===//
 //
 // LowerInvoke - This pass removes invoke instructions, converting them to call
 // instructions.
@@ -111,7 +117,7 @@
 
 /// This function returns a new pass that downgrades the debug info in the
 /// module to line tables only.
-ModulePass *createStripNonLineTableDebugInfoPass();
+ModulePass *createStripNonLineTableDebugLegacyPass();
 
 //===----------------------------------------------------------------------===//
 //
@@ -119,6 +125,42 @@
 // number of conditional branches in the hot paths based on profiles.
 //
 FunctionPass *createControlHeightReductionLegacyPass();
-}
+
+//===----------------------------------------------------------------------===//
+//
+// InjectTLIMappingsLegacy - populates the VFABI attribute with the
+// scalar-to-vector mappings from the TargetLibraryInfo.
+//
+FunctionPass *createInjectTLIMappingsLegacyPass();
+
+//===----------------------------------------------------------------------===//
+//
+// UnifyLoopExits - For each loop, creates a new block N such that all exiting
+// blocks branch to N, and then N distributes control flow to all the original
+// exit blocks.
+//
+FunctionPass *createUnifyLoopExitsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// FixIrreducible - Convert each SCC with irreducible control-flow
+// into a natural loop.
+//
+FunctionPass *createFixIrreduciblePass();
+
+//===----------------------------------------------------------------------===//
+//
+// AssumeSimplify - remove redundant assumes and merge assumes in the same
+// BasicBlock when possible.
+//
+FunctionPass *createAssumeSimplifyPass();
+
+//===----------------------------------------------------------------------===//
+//
+// CanonicalizeFreezeInLoops - Canonicalize freeze instructions in loops so they
+// don't block SCEV.
+//
+Pass *createCanonicalizeFreezeInLoopsPass();
+} // namespace llvm
 
 #endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/AMDGPUEmitPrintf.h b/linux-x64/clang/include/llvm/Transforms/Utils/AMDGPUEmitPrintf.h
new file mode 100644
index 0000000..65dbf47
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/AMDGPUEmitPrintf.h
@@ -0,0 +1,25 @@
+//===- AMDGPUEmitPrintf.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Utility function to lower a printf call into a series of device
+// library calls on the AMDGPU target.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_AMDGPUEMITPRINTF_H
+#define LLVM_TRANSFORMS_UTILS_AMDGPUEMITPRINTF_H
+
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+
+Value *emitAMDGPUPrintfCall(IRBuilder<> &Builder, ArrayRef<Value *> Args);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_AMDGPUEMITPRINTF_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/AssumeBundleBuilder.h b/linux-x64/clang/include/llvm/Transforms/Utils/AssumeBundleBuilder.h
new file mode 100644
index 0000000..8e00982
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/AssumeBundleBuilder.h
@@ -0,0 +1,60 @@
+//===- AssumeBundleBuilder.h - utils to build assume bundles ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contain tools to preserve informations. They should be used before
+// performing a transformation that may move and delete instructions as those
+// transformation may destroy or worsen information that can be derived from the
+// IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_ASSUMEBUNDLEBUILDER_H
+#define LLVM_TRANSFORMS_UTILS_ASSUMEBUNDLEBUILDER_H
+
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class IntrinsicInst;
+class AssumptionCache;
+class DominatorTree;
+
+/// Build a call to llvm.assume to preserve informations that can be derived
+/// from the given instruction.
+/// If no information derived from \p I, this call returns null.
+/// The returned instruction is not inserted anywhere.
+IntrinsicInst *buildAssumeFromInst(Instruction *I);
+
+/// Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert
+/// if before I. This is usually what need to be done to salvage the knowledge
+/// contained in the instruction I.
+/// The AssumptionCache must be provided if it is available or the cache may
+/// become silently be invalid.
+/// The DominatorTree can optionally be provided to enable cross-block
+/// reasoning.
+void salvageKnowledge(Instruction *I, AssumptionCache *AC = nullptr,
+                      DominatorTree *DT = nullptr);
+
+/// This pass attempts to minimize the number of assume without loosing any
+/// information.
+struct AssumeSimplifyPass : public PassInfoMixin<AssumeSimplifyPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+FunctionPass *createAssumeSimplifyPass();
+
+/// This pass will try to build an llvm.assume for every instruction in the
+/// function. Its main purpose is testing.
+struct AssumeBuilderPass : public PassInfoMixin<AssumeBuilderPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/BasicBlockUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/BasicBlockUtils.h
index 4d861ff..1dda739 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/BasicBlockUtils.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -17,7 +17,9 @@
 // FIXME: Move to this file: BasicBlock::removePredecessor, BB::splitBasicBlock
 
 #include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SetVector.h"
 #include "llvm/Analysis/DomTreeUpdater.h"
+#include "llvm/Analysis/LoopInfo.h"
 #include "llvm/IR/BasicBlock.h"
 #include "llvm/IR/CFG.h"
 #include "llvm/IR/InstrTypes.h"
@@ -72,21 +74,43 @@
 /// in it, fold them away. This handles the case when all entries to the PHI
 /// nodes in a block are guaranteed equal, such as when the block has exactly
 /// one predecessor.
-void FoldSingleEntryPHINodes(BasicBlock *BB,
+bool FoldSingleEntryPHINodes(BasicBlock *BB,
                              MemoryDependenceResults *MemDep = nullptr);
 
 /// Examine each PHI in the given block and delete it if it is dead. Also
 /// recursively delete any operands that become dead as a result. This includes
 /// tracing the def-use list from the PHI to see if it is ultimately unused or
 /// if it reaches an unused cycle. Return true if any PHIs were deleted.
-bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = nullptr);
+bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = nullptr,
+                    MemorySSAUpdater *MSSAU = nullptr);
 
 /// Attempts to merge a block into its predecessor, if possible. The return
 /// value indicates success or failure.
+/// By default do not merge blocks if BB's predecessor has multiple successors.
+/// If PredecessorWithTwoSuccessors = true, the blocks can only be merged
+/// if BB's Pred has a branch to BB and to AnotherBB, and BB has a single
+/// successor Sing. In this case the branch will be updated with Sing instead of
+/// BB, and BB will still be merged into its predecessor and removed.
 bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU = nullptr,
                                LoopInfo *LI = nullptr,
                                MemorySSAUpdater *MSSAU = nullptr,
-                               MemoryDependenceResults *MemDep = nullptr);
+                               MemoryDependenceResults *MemDep = nullptr,
+                               bool PredecessorWithTwoSuccessors = false);
+
+/// Merge block(s) sucessors, if possible. Return true if at least two
+/// of the blocks were merged together.
+/// In order to merge, each block must be terminated by an unconditional
+/// branch. If L is provided, then the blocks merged into their predecessors
+/// must be in L. In addition, This utility calls on another utility:
+/// MergeBlockIntoPredecessor. Blocks are successfully merged when the call to
+/// MergeBlockIntoPredecessor returns true.
+bool MergeBlockSuccessorsIntoGivenBlocks(
+    SmallPtrSetImpl<BasicBlock *> &MergeBlocks, Loop *L = nullptr,
+    DomTreeUpdater *DTU = nullptr, LoopInfo *LI = nullptr);
+
+/// Try to remove redundant dbg.value instructions from given basic block.
+/// Returns true if at least one instruction was removed.
+bool RemoveRedundantDbgInstrs(BasicBlock *BB);
 
 /// Replace all uses of an instruction (specified by BI) with a value, then
 /// remove and delete the original instruction.
@@ -117,6 +141,10 @@
   bool KeepOneInputPHIs = false;
   bool PreserveLCSSA = false;
   bool IgnoreUnreachableDests = false;
+  /// SplitCriticalEdge is guaranteed to preserve loop-simplify form if LI is
+  /// provided. If it cannot be preserved, no splitting will take place. If it
+  /// is not set, preserve loop-simplify form if possible.
+  bool PreserveLoopSimplify = true;
 
   CriticalEdgeSplittingOptions(DominatorTree *DT = nullptr,
                                LoopInfo *LI = nullptr,
@@ -143,6 +171,11 @@
     IgnoreUnreachableDests = true;
     return *this;
   }
+
+  CriticalEdgeSplittingOptions &unsetPreserveLoopSimplify() {
+    PreserveLoopSimplify = false;
+    return *this;
+  }
 };
 
 /// If this edge is a critical edge, insert a new node to split the critical
@@ -163,7 +196,8 @@
 /// to.
 BasicBlock *SplitCriticalEdge(Instruction *TI, unsigned SuccNum,
                               const CriticalEdgeSplittingOptions &Options =
-                                  CriticalEdgeSplittingOptions());
+                                  CriticalEdgeSplittingOptions(),
+                              const Twine &BBName = "");
 
 inline BasicBlock *
 SplitCriticalEdge(BasicBlock *BB, succ_iterator SI,
@@ -211,18 +245,71 @@
                                const CriticalEdgeSplittingOptions &Options =
                                    CriticalEdgeSplittingOptions());
 
-/// Split the edge connecting specified block.
+/// Split the edge connecting the specified blocks, and return the newly created
+/// basic block between \p From and \p To.
 BasicBlock *SplitEdge(BasicBlock *From, BasicBlock *To,
                       DominatorTree *DT = nullptr, LoopInfo *LI = nullptr,
-                      MemorySSAUpdater *MSSAU = nullptr);
+                      MemorySSAUpdater *MSSAU = nullptr,
+                      const Twine &BBName = "");
 
-/// Split the specified block at the specified instruction - everything before
-/// SplitPt stays in Old and everything starting with SplitPt moves to a new
-/// block. The two blocks are joined by an unconditional branch and the loop
-/// info is updated.
+/// Split the specified block at the specified instruction.
+///
+/// If \p Before is true, splitBlockBefore handles the block
+/// splitting. Otherwise, execution proceeds as described below.
+///
+/// Everything before \p SplitPt stays in \p Old and everything starting with \p
+/// SplitPt moves to a new block. The two blocks are joined by an unconditional
+/// branch. The new block with name \p BBName is returned.
+///
+/// FIXME: deprecated, switch to the DomTreeUpdater-based one.
+BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt, DominatorTree *DT,
+                       LoopInfo *LI = nullptr,
+                       MemorySSAUpdater *MSSAU = nullptr,
+                       const Twine &BBName = "", bool Before = false);
+
+/// Split the specified block at the specified instruction.
+///
+/// If \p Before is true, splitBlockBefore handles the block
+/// splitting. Otherwise, execution proceeds as described below.
+///
+/// Everything before \p SplitPt stays in \p Old and everything starting with \p
+/// SplitPt moves to a new block. The two blocks are joined by an unconditional
+/// branch. The new block with name \p BBName is returned.
 BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt,
-                       DominatorTree *DT = nullptr, LoopInfo *LI = nullptr,
-                       MemorySSAUpdater *MSSAU = nullptr);
+                       DomTreeUpdater *DTU = nullptr, LoopInfo *LI = nullptr,
+                       MemorySSAUpdater *MSSAU = nullptr,
+                       const Twine &BBName = "", bool Before = false);
+
+/// Split the specified block at the specified instruction \p SplitPt.
+/// All instructions before \p SplitPt are moved to a new block and all
+/// instructions after \p SplitPt stay in the old block. The new block and the
+/// old block are joined by inserting an unconditional branch to the end of the
+/// new block. The new block with name \p BBName is returned.
+BasicBlock *splitBlockBefore(BasicBlock *Old, Instruction *SplitPt,
+                             DomTreeUpdater *DTU, LoopInfo *LI,
+                             MemorySSAUpdater *MSSAU, const Twine &BBName = "");
+
+/// This method introduces at least one new basic block into the function and
+/// moves some of the predecessors of BB to be predecessors of the new block.
+/// The new predecessors are indicated by the Preds array. The new block is
+/// given a suffix of 'Suffix'. Returns new basic block to which predecessors
+/// from Preds are now pointing.
+///
+/// If BB is a landingpad block then additional basicblock might be introduced.
+/// It will have Suffix+".split_lp". See SplitLandingPadPredecessors for more
+/// details on this case.
+///
+/// This currently updates the LLVM IR, DominatorTree, LoopInfo, and LCCSA but
+/// no other analyses. In particular, it does not preserve LoopSimplify
+/// (because it's complicated to handle the case where one of the edges being
+/// split is an exit of a loop with other exits).
+///
+/// FIXME: deprecated, switch to the DomTreeUpdater-based one.
+BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock *> Preds,
+                                   const char *Suffix, DominatorTree *DT,
+                                   LoopInfo *LI = nullptr,
+                                   MemorySSAUpdater *MSSAU = nullptr,
+                                   bool PreserveLCSSA = false);
 
 /// This method introduces at least one new basic block into the function and
 /// moves some of the predecessors of BB to be predecessors of the new block.
@@ -240,7 +327,7 @@
 /// split is an exit of a loop with other exits).
 BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock *> Preds,
                                    const char *Suffix,
-                                   DominatorTree *DT = nullptr,
+                                   DomTreeUpdater *DTU = nullptr,
                                    LoopInfo *LI = nullptr,
                                    MemorySSAUpdater *MSSAU = nullptr,
                                    bool PreserveLCSSA = false);
@@ -256,10 +343,31 @@
 /// no other analyses. In particular, it does not preserve LoopSimplify
 /// (because it's complicated to handle the case where one of the edges being
 /// split is an exit of a loop with other exits).
+///
+/// FIXME: deprecated, switch to the DomTreeUpdater-based one.
+void SplitLandingPadPredecessors(BasicBlock *OrigBB,
+                                 ArrayRef<BasicBlock *> Preds,
+                                 const char *Suffix, const char *Suffix2,
+                                 SmallVectorImpl<BasicBlock *> &NewBBs,
+                                 DominatorTree *DT, LoopInfo *LI = nullptr,
+                                 MemorySSAUpdater *MSSAU = nullptr,
+                                 bool PreserveLCSSA = false);
+
+/// This method transforms the landing pad, OrigBB, by introducing two new basic
+/// blocks into the function. One of those new basic blocks gets the
+/// predecessors listed in Preds. The other basic block gets the remaining
+/// predecessors of OrigBB. The landingpad instruction OrigBB is clone into both
+/// of the new basic blocks. The new blocks are given the suffixes 'Suffix1' and
+/// 'Suffix2', and are returned in the NewBBs vector.
+///
+/// This currently updates the LLVM IR, DominatorTree, LoopInfo, and LCCSA but
+/// no other analyses. In particular, it does not preserve LoopSimplify
+/// (because it's complicated to handle the case where one of the edges being
+/// split is an exit of a loop with other exits).
 void SplitLandingPadPredecessors(
     BasicBlock *OrigBB, ArrayRef<BasicBlock *> Preds, const char *Suffix,
     const char *Suffix2, SmallVectorImpl<BasicBlock *> &NewBBs,
-    DominatorTree *DT = nullptr, LoopInfo *LI = nullptr,
+    DomTreeUpdater *DTU = nullptr, LoopInfo *LI = nullptr,
     MemorySSAUpdater *MSSAU = nullptr, bool PreserveLCSSA = false);
 
 /// This method duplicates the specified return instruction into a predecessor
@@ -291,10 +399,39 @@
 /// Returns the NewBasicBlock's terminator.
 ///
 /// Updates DT and LI if given.
+///
+/// FIXME: deprecated, switch to the DomTreeUpdater-based one.
+Instruction *SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore,
+                                       bool Unreachable, MDNode *BranchWeights,
+                                       DominatorTree *DT,
+                                       LoopInfo *LI = nullptr,
+                                       BasicBlock *ThenBlock = nullptr);
+
+/// Split the containing block at the specified instruction - everything before
+/// SplitBefore stays in the old basic block, and the rest of the instructions
+/// in the BB are moved to a new block. The two blocks are connected by a
+/// conditional branch (with value of Cmp being the condition).
+/// Before:
+///   Head
+///   SplitBefore
+///   Tail
+/// After:
+///   Head
+///   if (Cond)
+///     ThenBlock
+///   SplitBefore
+///   Tail
+///
+/// If \p ThenBlock is not specified, a new block will be created for it.
+/// If \p Unreachable is true, the newly created block will end with
+/// UnreachableInst, otherwise it branches to Tail.
+/// Returns the NewBasicBlock's terminator.
+///
+/// Updates DT and LI if given.
 Instruction *SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore,
                                        bool Unreachable,
                                        MDNode *BranchWeights = nullptr,
-                                       DominatorTree *DT = nullptr,
+                                       DomTreeUpdater *DTU = nullptr,
                                        LoopInfo *LI = nullptr,
                                        BasicBlock *ThenBlock = nullptr);
 
@@ -351,6 +488,81 @@
                                   BranchProbabilityInfo *BPI = nullptr,
                                   BlockFrequencyInfo *BFI = nullptr);
 
+/// Given a set of incoming and outgoing blocks, create a "hub" such that every
+/// edge from an incoming block InBB to an outgoing block OutBB is now split
+/// into two edges, one from InBB to the hub and another from the hub to
+/// OutBB. The hub consists of a series of guard blocks, one for each outgoing
+/// block. Each guard block conditionally branches to the corresponding outgoing
+/// block, or the next guard block in the chain. These guard blocks are returned
+/// in the argument vector.
+///
+/// Since the control flow edges from InBB to OutBB have now been replaced, the
+/// function also updates any PHINodes in OutBB. For each such PHINode, the
+/// operands corresponding to incoming blocks are moved to a new PHINode in the
+/// hub, and the hub is made an operand of the original PHINode.
+///
+/// Input CFG:
+/// ----------
+///
+///                    Def
+///                     |
+///                     v
+///           In1      In2
+///            |        |
+///            |        |
+///            v        v
+///  Foo ---> Out1     Out2
+///                     |
+///                     v
+///                    Use
+///
+///
+/// Create hub: Incoming = {In1, In2}, Outgoing = {Out1, Out2}
+/// ----------------------------------------------------------
+///
+///             Def
+///              |
+///              v
+///  In1        In2          Foo
+///   |    Hub   |            |
+///   |    + - - | - - +      |
+///   |    '     v     '      V
+///   +------> Guard1 -----> Out1
+///        '     |     '
+///        '     v     '
+///        '   Guard2 -----> Out2
+///        '           '      |
+///        + - - - - - +      |
+///                           v
+///                          Use
+///
+/// Limitations:
+/// -----------
+/// 1. This assumes that all terminators in the CFG are direct branches (the
+///    "br" instruction). The presence of any other control flow such as
+///    indirectbr, switch or callbr will cause an assert.
+///
+/// 2. The updates to the PHINodes are not sufficient to restore SSA
+///    form. Consider a definition Def, its use Use, incoming block In2 and
+///    outgoing block Out2, such that:
+///    a. In2 is reachable from D or contains D.
+///    b. U is reachable from Out2 or is contained in Out2.
+///    c. U is not a PHINode if U is contained in Out2.
+///
+///    Clearly, Def dominates Out2 since the program is valid SSA. But when the
+///    hub is introduced, there is a new path through the hub along which Use is
+///    reachable from entry without passing through Def, and SSA is no longer
+///    valid. To fix this, we need to look at all the blocks post-dominated by
+///    the hub on the one hand, and dominated by Out2 on the other. This is left
+///    for the caller to accomplish, since each specific use of this function
+///    may have additional information which simplifies this fixup. For example,
+///    see restoreSSA() in the UnifyLoopExits pass.
+BasicBlock *CreateControlFlowHub(DomTreeUpdater *DTU,
+                                 SmallVectorImpl<BasicBlock *> &GuardBlocks,
+                                 const SetVector<BasicBlock *> &Predecessors,
+                                 const SetVector<BasicBlock *> &Successors,
+                                 const StringRef Prefix);
+
 } // end namespace llvm
 
 #endif // LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/BuildLibCalls.h b/linux-x64/clang/include/llvm/Transforms/Utils/BuildLibCalls.h
index 8421c31..e7d4193 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/BuildLibCalls.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/BuildLibCalls.h
@@ -15,12 +15,11 @@
 #define LLVM_TRANSFORMS_UTILS_BUILDLIBCALLS_H
 
 #include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/IR/IRBuilder.h"
 
 namespace llvm {
   class Value;
   class DataLayout;
-  class TargetLibraryInfo;
+  class IRBuilderBase;
 
   /// Analyze the name and prototype of the given function and set any
   /// applicable attributes.
@@ -30,131 +29,139 @@
   bool inferLibFuncAttributes(Function &F, const TargetLibraryInfo &TLI);
   bool inferLibFuncAttributes(Module *M, StringRef Name, const TargetLibraryInfo &TLI);
 
-  /// Check whether the overloaded unary floating point function
+  /// Check whether the overloaded floating point function
   /// corresponding to \a Ty is available.
-  bool hasUnaryFloatFn(const TargetLibraryInfo *TLI, Type *Ty,
-                       LibFunc DoubleFn, LibFunc FloatFn,
-                       LibFunc LongDoubleFn);
+  bool hasFloatFn(const TargetLibraryInfo *TLI, Type *Ty,
+                  LibFunc DoubleFn, LibFunc FloatFn, LibFunc LongDoubleFn);
 
-  /// Get the name of the overloaded unary floating point function
+  /// Get the name of the overloaded floating point function
   /// corresponding to \a Ty.
-  StringRef getUnaryFloatFn(const TargetLibraryInfo *TLI, Type *Ty,
-                            LibFunc DoubleFn, LibFunc FloatFn,
-                            LibFunc LongDoubleFn);
+  StringRef getFloatFnName(const TargetLibraryInfo *TLI, Type *Ty,
+                           LibFunc DoubleFn, LibFunc FloatFn,
+                           LibFunc LongDoubleFn);
 
   /// Return V if it is an i8*, otherwise cast it to i8*.
-  Value *castToCStr(Value *V, IRBuilder<> &B);
+  Value *castToCStr(Value *V, IRBuilderBase &B);
 
   /// Emit a call to the strlen function to the builder, for the specified
   /// pointer. Ptr is required to be some pointer type, and the return value has
   /// 'intptr_t' type.
-  Value *emitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout &DL,
+  Value *emitStrLen(Value *Ptr, IRBuilderBase &B, const DataLayout &DL,
                     const TargetLibraryInfo *TLI);
 
+  /// Emit a call to the strdup function to the builder, for the specified
+  /// pointer. Ptr is required to be some pointer type, and the return value has
+  /// 'i8*' type.
+  Value *emitStrDup(Value *Ptr, IRBuilderBase &B, const TargetLibraryInfo *TLI);
+
   /// Emit a call to the strnlen function to the builder, for the specified
   /// pointer. Ptr is required to be some pointer type, MaxLen must be of size_t
   /// type, and the return value has 'intptr_t' type.
-  Value *emitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
+  Value *emitStrNLen(Value *Ptr, Value *MaxLen, IRBuilderBase &B,
                      const DataLayout &DL, const TargetLibraryInfo *TLI);
 
   /// Emit a call to the strchr function to the builder, for the specified
   /// pointer and character. Ptr is required to be some pointer type, and the
   /// return value has 'i8*' type.
-  Value *emitStrChr(Value *Ptr, char C, IRBuilder<> &B,
+  Value *emitStrChr(Value *Ptr, char C, IRBuilderBase &B,
                     const TargetLibraryInfo *TLI);
 
   /// Emit a call to the strncmp function to the builder.
-  Value *emitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
+  Value *emitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilderBase &B,
                      const DataLayout &DL, const TargetLibraryInfo *TLI);
 
   /// Emit a call to the strcpy function to the builder, for the specified
   /// pointer arguments.
-  Value *emitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
+  Value *emitStrCpy(Value *Dst, Value *Src, IRBuilderBase &B,
                     const TargetLibraryInfo *TLI);
 
   /// Emit a call to the stpcpy function to the builder, for the specified
   /// pointer arguments.
-  Value *emitStpCpy(Value *Dst, Value *Src, IRBuilder<> &B,
+  Value *emitStpCpy(Value *Dst, Value *Src, IRBuilderBase &B,
                     const TargetLibraryInfo *TLI);
 
   /// Emit a call to the strncpy function to the builder, for the specified
   /// pointer arguments and length.
-  Value *emitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B,
+  Value *emitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilderBase &B,
                      const TargetLibraryInfo *TLI);
 
   /// Emit a call to the stpncpy function to the builder, for the specified
   /// pointer arguments and length.
-  Value *emitStpNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B,
+  Value *emitStpNCpy(Value *Dst, Value *Src, Value *Len, IRBuilderBase &B,
                      const TargetLibraryInfo *TLI);
 
   /// Emit a call to the __memcpy_chk function to the builder. This expects that
   /// the Len and ObjSize have type 'intptr_t' and Dst/Src are pointers.
   Value *emitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
-                       IRBuilder<> &B, const DataLayout &DL,
+                       IRBuilderBase &B, const DataLayout &DL,
                        const TargetLibraryInfo *TLI);
 
+  /// Emit a call to the mempcpy function.
+  Value *emitMemPCpy(Value *Dst, Value *Src, Value *Len, IRBuilderBase &B,
+                     const DataLayout &DL, const TargetLibraryInfo *TLI);
+
   /// Emit a call to the memchr function. This assumes that Ptr is a pointer,
   /// Val is an i32 value, and Len is an 'intptr_t' value.
-  Value *emitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilder<> &B,
+  Value *emitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilderBase &B,
                     const DataLayout &DL, const TargetLibraryInfo *TLI);
 
   /// Emit a call to the memcmp function.
-  Value *emitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
+  Value *emitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilderBase &B,
                     const DataLayout &DL, const TargetLibraryInfo *TLI);
 
   /// Emit a call to the bcmp function.
-  Value *emitBCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
+  Value *emitBCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilderBase &B,
                   const DataLayout &DL, const TargetLibraryInfo *TLI);
 
   /// Emit a call to the memccpy function.
   Value *emitMemCCpy(Value *Ptr1, Value *Ptr2, Value *Val, Value *Len,
-                     IRBuilder<> &B, const TargetLibraryInfo *TLI);
+                     IRBuilderBase &B, const TargetLibraryInfo *TLI);
 
   /// Emit a call to the snprintf function.
   Value *emitSNPrintf(Value *Dest, Value *Size, Value *Fmt,
-                      ArrayRef<Value *> Args, IRBuilder<> &B,
+                      ArrayRef<Value *> Args, IRBuilderBase &B,
                       const TargetLibraryInfo *TLI);
 
   /// Emit a call to the sprintf function.
   Value *emitSPrintf(Value *Dest, Value *Fmt, ArrayRef<Value *> VariadicArgs,
-                     IRBuilder<> &B, const TargetLibraryInfo *TLI);
+                     IRBuilderBase &B, const TargetLibraryInfo *TLI);
 
   /// Emit a call to the strcat function.
-  Value *emitStrCat(Value *Dest, Value *Src, IRBuilder<> &B,
+  Value *emitStrCat(Value *Dest, Value *Src, IRBuilderBase &B,
                     const TargetLibraryInfo *TLI);
 
   /// Emit a call to the strlcpy function.
-  Value *emitStrLCpy(Value *Dest, Value *Src, Value *Size, IRBuilder<> &B,
+  Value *emitStrLCpy(Value *Dest, Value *Src, Value *Size, IRBuilderBase &B,
                      const TargetLibraryInfo *TLI);
 
   /// Emit a call to the strlcat function.
-  Value *emitStrLCat(Value *Dest, Value *Src, Value *Size, IRBuilder<> &B,
+  Value *emitStrLCat(Value *Dest, Value *Src, Value *Size, IRBuilderBase &B,
                      const TargetLibraryInfo *TLI);
 
   /// Emit a call to the strncat function.
-  Value *emitStrNCat(Value *Dest, Value *Src, Value *Size, IRBuilder<> &B,
+  Value *emitStrNCat(Value *Dest, Value *Src, Value *Size, IRBuilderBase &B,
                      const TargetLibraryInfo *TLI);
 
   /// Emit a call to the vsnprintf function.
   Value *emitVSNPrintf(Value *Dest, Value *Size, Value *Fmt, Value *VAList,
-                       IRBuilder<> &B, const TargetLibraryInfo *TLI);
+                       IRBuilderBase &B, const TargetLibraryInfo *TLI);
 
   /// Emit a call to the vsprintf function.
-  Value *emitVSPrintf(Value *Dest, Value *Fmt, Value *VAList, IRBuilder<> &B,
+  Value *emitVSPrintf(Value *Dest, Value *Fmt, Value *VAList, IRBuilderBase &B,
                       const TargetLibraryInfo *TLI);
 
   /// Emit a call to the unary function named 'Name' (e.g.  'floor'). This
   /// function is known to take a single of type matching 'Op' and returns one
   /// value with the same type. If 'Op' is a long double, 'l' is added as the
   /// suffix of name, if 'Op' is a float, we add a 'f' suffix.
-  Value *emitUnaryFloatFnCall(Value *Op, StringRef Name, IRBuilder<> &B,
+  Value *emitUnaryFloatFnCall(Value *Op, StringRef Name, IRBuilderBase &B,
                               const AttributeList &Attrs);
 
   /// Emit a call to the unary function DoubleFn, FloatFn or LongDoubleFn,
   /// depending of the type of Op.
   Value *emitUnaryFloatFnCall(Value *Op, const TargetLibraryInfo *TLI,
                               LibFunc DoubleFn, LibFunc FloatFn,
-                              LibFunc LongDoubleFn, IRBuilder<> &B,
+                              LibFunc LongDoubleFn, IRBuilderBase &B,
                               const AttributeList &Attrs);
 
   /// Emit a call to the binary function named 'Name' (e.g. 'fmin'). This
@@ -162,67 +169,44 @@
   /// value with the same type. If 'Op1/Op2' are long double, 'l' is added as
   /// the suffix of name, if 'Op1/Op2' are float, we add a 'f' suffix.
   Value *emitBinaryFloatFnCall(Value *Op1, Value *Op2, StringRef Name,
-                               IRBuilder<> &B, const AttributeList &Attrs);
+                               IRBuilderBase &B, const AttributeList &Attrs);
+
+  /// Emit a call to the binary function DoubleFn, FloatFn or LongDoubleFn,
+  /// depending of the type of Op1.
+  Value *emitBinaryFloatFnCall(Value *Op1, Value *Op2,
+                               const TargetLibraryInfo *TLI, LibFunc DoubleFn,
+                               LibFunc FloatFn, LibFunc LongDoubleFn,
+                               IRBuilderBase &B, const AttributeList &Attrs);
 
   /// Emit a call to the putchar function. This assumes that Char is an integer.
-  Value *emitPutChar(Value *Char, IRBuilder<> &B, const TargetLibraryInfo *TLI);
+  Value *emitPutChar(Value *Char, IRBuilderBase &B,
+                     const TargetLibraryInfo *TLI);
 
   /// Emit a call to the puts function. This assumes that Str is some pointer.
-  Value *emitPutS(Value *Str, IRBuilder<> &B, const TargetLibraryInfo *TLI);
+  Value *emitPutS(Value *Str, IRBuilderBase &B, const TargetLibraryInfo *TLI);
 
   /// Emit a call to the fputc function. This assumes that Char is an i32, and
   /// File is a pointer to FILE.
-  Value *emitFPutC(Value *Char, Value *File, IRBuilder<> &B,
+  Value *emitFPutC(Value *Char, Value *File, IRBuilderBase &B,
                    const TargetLibraryInfo *TLI);
 
-  /// Emit a call to the fputc_unlocked function. This assumes that Char is an
-  /// i32, and File is a pointer to FILE.
-  Value *emitFPutCUnlocked(Value *Char, Value *File, IRBuilder<> &B,
-                           const TargetLibraryInfo *TLI);
-
   /// Emit a call to the fputs function. Str is required to be a pointer and
   /// File is a pointer to FILE.
-  Value *emitFPutS(Value *Str, Value *File, IRBuilder<> &B,
+  Value *emitFPutS(Value *Str, Value *File, IRBuilderBase &B,
                    const TargetLibraryInfo *TLI);
 
-  /// Emit a call to the fputs_unlocked function. Str is required to be a
-  /// pointer and File is a pointer to FILE.
-  Value *emitFPutSUnlocked(Value *Str, Value *File, IRBuilder<> &B,
-                           const TargetLibraryInfo *TLI);
-
   /// Emit a call to the fwrite function. This assumes that Ptr is a pointer,
   /// Size is an 'intptr_t', and File is a pointer to FILE.
-  Value *emitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B,
+  Value *emitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilderBase &B,
                     const DataLayout &DL, const TargetLibraryInfo *TLI);
 
   /// Emit a call to the malloc function.
-  Value *emitMalloc(Value *Num, IRBuilder<> &B, const DataLayout &DL,
+  Value *emitMalloc(Value *Num, IRBuilderBase &B, const DataLayout &DL,
                     const TargetLibraryInfo *TLI);
 
   /// Emit a call to the calloc function.
   Value *emitCalloc(Value *Num, Value *Size, const AttributeList &Attrs,
-                    IRBuilder<> &B, const TargetLibraryInfo &TLI);
-
-  /// Emit a call to the fwrite_unlocked function. This assumes that Ptr is a
-  /// pointer, Size is an 'intptr_t', N is nmemb and File is a pointer to FILE.
-  Value *emitFWriteUnlocked(Value *Ptr, Value *Size, Value *N, Value *File,
-                            IRBuilder<> &B, const DataLayout &DL,
-                            const TargetLibraryInfo *TLI);
-
-  /// Emit a call to the fgetc_unlocked function. File is a pointer to FILE.
-  Value *emitFGetCUnlocked(Value *File, IRBuilder<> &B,
-                           const TargetLibraryInfo *TLI);
-
-  /// Emit a call to the fgets_unlocked function. Str is required to be a
-  /// pointer, Size is an i32 and File is a pointer to FILE.
-  Value *emitFGetSUnlocked(Value *Str, Value *Size, Value *File, IRBuilder<> &B,
-                           const TargetLibraryInfo *TLI);
-
-  /// Emit a call to the fread_unlocked function. This assumes that Ptr is a
-  /// pointer, Size is an 'intptr_t', N is nmemb and File is a pointer to FILE.
-  Value *emitFReadUnlocked(Value *Ptr, Value *Size, Value *N, Value *File,
-                           IRBuilder<> &B, const DataLayout &DL,
-                           const TargetLibraryInfo *TLI);
+                    IRBuilderBase &B, const TargetLibraryInfo &TLI);
 }
 
 #endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/BypassSlowDivision.h b/linux-x64/clang/include/llvm/Transforms/Utils/BypassSlowDivision.h
index 4710559..bd98c90 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/BypassSlowDivision.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/BypassSlowDivision.h
@@ -19,6 +19,7 @@
 
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/IR/ValueHandle.h"
 #include <cstdint>
 
 namespace llvm {
@@ -28,8 +29,10 @@
 
 struct DivRemMapKey {
   bool SignedOp;
-  Value *Dividend;
-  Value *Divisor;
+  AssertingVH<Value> Dividend;
+  AssertingVH<Value> Divisor;
+
+  DivRemMapKey() = default;
 
   DivRemMapKey(bool InSignedOp, Value *InDividend, Value *InDivisor)
       : SignedOp(InSignedOp), Dividend(InDividend), Divisor(InDivisor) {}
@@ -50,8 +53,10 @@
   }
 
   static unsigned getHashValue(const DivRemMapKey &Val) {
-    return (unsigned)(reinterpret_cast<uintptr_t>(Val.Dividend) ^
-                      reinterpret_cast<uintptr_t>(Val.Divisor)) ^
+    return (unsigned)(reinterpret_cast<uintptr_t>(
+                          static_cast<Value *>(Val.Dividend)) ^
+                      reinterpret_cast<uintptr_t>(
+                          static_cast<Value *>(Val.Divisor))) ^
            (unsigned)Val.SignedOp;
   }
 };
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/CallGraphUpdater.h b/linux-x64/clang/include/llvm/Transforms/Utils/CallGraphUpdater.h
new file mode 100644
index 0000000..f8211d6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/CallGraphUpdater.h
@@ -0,0 +1,109 @@
+//===- CallGraphUpdater.h - A (lazy) call graph update helper ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides interfaces used to manipulate a call graph, regardless
+/// if it is a "old style" CallGraph or an "new style" LazyCallGraph.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CALLGRAPHUPDATER_H
+#define LLVM_TRANSFORMS_UTILS_CALLGRAPHUPDATER_H
+
+#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/Analysis/CallGraphSCCPass.h"
+#include "llvm/Analysis/LazyCallGraph.h"
+
+namespace llvm {
+
+/// Wrapper to unify "old style" CallGraph and "new style" LazyCallGraph. This
+/// simplifies the interface and the call sites, e.g., new and old pass manager
+/// passes can share the same code.
+class CallGraphUpdater {
+  /// Containers for functions which we did replace or want to delete when
+  /// `finalize` is called. This can happen explicitly or as part of the
+  /// destructor. Dead functions in comdat sections are tracked separately
+  /// because a function with discardable linakage in a COMDAT should only
+  /// be dropped if the entire COMDAT is dropped, see git ac07703842cf.
+  ///{
+  SmallPtrSet<Function *, 16> ReplacedFunctions;
+  SmallVector<Function *, 16> DeadFunctions;
+  SmallVector<Function *, 16> DeadFunctionsInComdats;
+  ///}
+
+  /// Old PM variables
+  ///{
+  CallGraph *CG = nullptr;
+  CallGraphSCC *CGSCC = nullptr;
+  ///}
+
+  /// New PM variables
+  ///{
+  LazyCallGraph *LCG = nullptr;
+  LazyCallGraph::SCC *SCC = nullptr;
+  CGSCCAnalysisManager *AM = nullptr;
+  CGSCCUpdateResult *UR = nullptr;
+  FunctionAnalysisManager *FAM = nullptr;
+  ///}
+
+public:
+  CallGraphUpdater() {}
+  ~CallGraphUpdater() { finalize(); }
+
+  /// Initializers for usage outside of a CGSCC pass, inside a CGSCC pass in
+  /// the old and new pass manager (PM).
+  ///{
+  void initialize(CallGraph &CG, CallGraphSCC &SCC) {
+    this->CG = &CG;
+    this->CGSCC = &SCC;
+  }
+  void initialize(LazyCallGraph &LCG, LazyCallGraph::SCC &SCC,
+                  CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR) {
+    this->LCG = &LCG;
+    this->SCC = &SCC;
+    this->AM = &AM;
+    this->UR = &UR;
+    FAM =
+        &AM.getResult<FunctionAnalysisManagerCGSCCProxy>(SCC, LCG).getManager();
+  }
+  ///}
+
+  /// Finalizer that will trigger actions like function removal from the CG.
+  bool finalize();
+
+  /// Remove \p Fn from the call graph.
+  void removeFunction(Function &Fn);
+
+  /// After an CGSCC pass changes a function in ways that affect the call
+  /// graph, this method can be called to update it.
+  void reanalyzeFunction(Function &Fn);
+
+  /// If a new function was created by outlining, this method can be called
+  /// to update the call graph for the new function. Note that the old one
+  /// still needs to be re-analyzed or manually updated.
+  void registerOutlinedFunction(Function &OriginalFn, Function &NewFn);
+
+  /// Replace \p OldFn in the call graph (and SCC) with \p NewFn. The uses
+  /// outside the call graph and the function \p OldFn are not modified.
+  /// Note that \p OldFn is also removed from the call graph
+  /// (\see removeFunction).
+  void replaceFunctionWith(Function &OldFn, Function &NewFn);
+
+  /// Remove the call site \p CS from the call graph.
+  void removeCallSite(CallBase &CS);
+
+  /// Replace \p OldCS with the new call site \p NewCS.
+  /// \return True if the replacement was successful, otherwise False. In the
+  /// latter case the parent function of \p OldCB needs to be re-analyzed.
+  bool replaceCallSite(CallBase &OldCS, CallBase &NewCS);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_CALLGRAPHUPDATER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/CallPromotionUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/CallPromotionUtils.h
index d9d171c..daa8898 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/CallPromotionUtils.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/CallPromotionUtils.h
@@ -14,9 +14,11 @@
 #ifndef LLVM_TRANSFORMS_UTILS_CALLPROMOTIONUTILS_H
 #define LLVM_TRANSFORMS_UTILS_CALLPROMOTIONUTILS_H
 
-#include "llvm/IR/CallSite.h"
-
 namespace llvm {
+class CallBase;
+class CastInst;
+class Function;
+class MDNode;
 
 /// Return true if the given indirect call site can be made to call \p Callee.
 ///
@@ -25,7 +27,7 @@
 /// match exactly, they must at least be bitcast compatible. If \p FailureReason
 /// is non-null and the indirect call cannot be promoted, the failure reason
 /// will be stored in it.
-bool isLegalToPromote(CallSite CS, Function *Callee,
+bool isLegalToPromote(const CallBase &CB, Function *Callee,
                       const char **FailureReason = nullptr);
 
 /// Promote the given indirect call site to unconditionally call \p Callee.
@@ -35,8 +37,8 @@
 /// of the callee, bitcast instructions are inserted where appropriate. If \p
 /// RetBitCast is non-null, it will be used to store the return value bitcast,
 /// if created.
-Instruction *promoteCall(CallSite CS, Function *Callee,
-                         CastInst **RetBitCast = nullptr);
+CallBase &promoteCall(CallBase &CB, Function *Callee,
+                      CastInst **RetBitCast = nullptr);
 
 /// Promote the given indirect call site to conditionally call \p Callee.
 ///
@@ -45,8 +47,31 @@
 /// indirect call site is promoted, placed in the "then" block, and returned. If
 /// \p BranchWeights is non-null, it will be used to set !prof metadata on the
 /// new conditional branch.
-Instruction *promoteCallWithIfThenElse(CallSite CS, Function *Callee,
-                                       MDNode *BranchWeights = nullptr);
+CallBase &promoteCallWithIfThenElse(CallBase &CB, Function *Callee,
+                                    MDNode *BranchWeights = nullptr);
+
+/// Try to promote (devirtualize) a virtual call on an Alloca. Return true on
+/// success.
+///
+/// Look for a pattern like:
+///
+///  %o = alloca %class.Impl
+///  %1 = getelementptr %class.Impl, %class.Impl* %o, i64 0, i32 0, i32 0
+///  store i32 (...)** bitcast (i8** getelementptr inbounds
+///      ({ [3 x i8*] }, { [3 x i8*] }* @_ZTV4Impl, i64 0, inrange i32 0, i64 2)
+///      to i32 (...)**), i32 (...)*** %1
+///  %2 = getelementptr inbounds %class.Impl, %class.Impl* %o, i64 0, i32 0
+///  %3 = bitcast %class.Interface* %2 to void (%class.Interface*)***
+///  %vtable.i = load void (%class.Interface*)**, void (%class.Interface*)*** %3
+///  %4 = load void (%class.Interface*)*, void (%class.Interface*)** %vtable.i
+///  call void %4(%class.Interface* nonnull %2)
+///
+/// @_ZTV4Impl = linkonce_odr dso_local unnamed_addr constant { [3 x i8*] }
+///     { [3 x i8*]
+///     [i8* null, i8* bitcast ({ i8*, i8*, i8* }* @_ZTI4Impl to i8*),
+///     i8* bitcast (void (%class.Impl*)* @_ZN4Impl3RunEv to i8*)] }
+///
+bool tryPromoteCall(CallBase &CB);
 
 } // end namespace llvm
 
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/CanonicalizeFreezeInLoops.h b/linux-x64/clang/include/llvm/Transforms/Utils/CanonicalizeFreezeInLoops.h
new file mode 100644
index 0000000..3481a09
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/CanonicalizeFreezeInLoops.h
@@ -0,0 +1,33 @@
+//==- CanonicalizeFreezeInLoop.h - Canonicalize freezes in a loop-*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file canonicalizes freeze instructions in a loop.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CANONICALIZE_FREEZES_IN_LOOPS_H
+#define LLVM_TRANSFORMS_UTILS_CANONICALIZE_FREEZES_IN_LOOPS_H
+
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class LPMUpdater;
+
+/// A pass that canonicalizes freeze instructions in a loop.
+class CanonicalizeFreezeInLoopsPass
+    : public PassInfoMixin<CanonicalizeFreezeInLoopsPass> {
+public:
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_CANONICALIZE_FREEZES_IN_LOOPS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/Cloning.h b/linux-x64/clang/include/llvm/Transforms/Utils/Cloning.h
index 872ab9c..dffb780 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/Cloning.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/Cloning.h
@@ -19,10 +19,8 @@
 
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/Twine.h"
-#include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Analysis/AssumptionCache.h"
 #include "llvm/Analysis/InlineCost.h"
-#include "llvm/IR/CallSite.h"
 #include "llvm/IR/ValueHandle.h"
 #include "llvm/Transforms/Utils/ValueMapper.h"
 #include <functional>
@@ -31,6 +29,7 @@
 
 namespace llvm {
 
+class AAResults;
 class AllocaInst;
 class BasicBlock;
 class BlockFrequencyInfo;
@@ -172,19 +171,19 @@
 /// the auxiliary results produced by it.
 class InlineFunctionInfo {
 public:
-  explicit InlineFunctionInfo(CallGraph *cg = nullptr,
-                              std::function<AssumptionCache &(Function &)>
-                                  *GetAssumptionCache = nullptr,
-                              ProfileSummaryInfo *PSI = nullptr,
-                              BlockFrequencyInfo *CallerBFI = nullptr,
-                              BlockFrequencyInfo *CalleeBFI = nullptr)
+  explicit InlineFunctionInfo(
+      CallGraph *cg = nullptr,
+      function_ref<AssumptionCache &(Function &)> GetAssumptionCache = nullptr,
+      ProfileSummaryInfo *PSI = nullptr,
+      BlockFrequencyInfo *CallerBFI = nullptr,
+      BlockFrequencyInfo *CalleeBFI = nullptr)
       : CG(cg), GetAssumptionCache(GetAssumptionCache), PSI(PSI),
         CallerBFI(CallerBFI), CalleeBFI(CalleeBFI) {}
 
   /// If non-null, InlineFunction will update the callgraph to reflect the
   /// changes it makes.
   CallGraph *CG;
-  std::function<AssumptionCache &(Function &)> *GetAssumptionCache;
+  function_ref<AssumptionCache &(Function &)> GetAssumptionCache;
   ProfileSummaryInfo *PSI;
   BlockFrequencyInfo *CallerBFI, *CalleeBFI;
 
@@ -201,7 +200,7 @@
   /// 'InlineFunction' fills this in by scanning the inlined instructions, and
   /// only if CG is null. If CG is non-null, instead the value handle
   /// `InlinedCalls` above is used.
-  SmallVector<CallSite, 8> InlinedCallSites;
+  SmallVector<CallBase *, 8> InlinedCallSites;
 
   void reset() {
     StaticAllocas.clear();
@@ -229,10 +228,7 @@
 /// and all varargs at the callsite will be passed to any calls to
 /// ForwardVarArgsTo. The caller of InlineFunction has to make sure any varargs
 /// are only used by ForwardVarArgsTo.
-InlineResult InlineFunction(CallBase *CB, InlineFunctionInfo &IFI,
-                            AAResults *CalleeAAR = nullptr,
-                            bool InsertLifetime = true);
-InlineResult InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
+InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
                             AAResults *CalleeAAR = nullptr,
                             bool InsertLifetime = true,
                             Function *ForwardVarArgsTo = nullptr);
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/CodeExtractor.h b/linux-x64/clang/include/llvm/Transforms/Utils/CodeExtractor.h
index becbf0e..1d9f2d1 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/CodeExtractor.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/CodeExtractor.h
@@ -22,6 +22,7 @@
 
 namespace llvm {
 
+class AllocaInst;
 class BasicBlock;
 class BlockFrequency;
 class BlockFrequencyInfo;
@@ -36,6 +37,38 @@
 class Type;
 class Value;
 
+/// A cache for the CodeExtractor analysis. The operation \ref
+/// CodeExtractor::extractCodeRegion is guaranteed not to invalidate this
+/// object. This object should conservatively be considered invalid if any
+/// other mutating operations on the IR occur.
+///
+/// Constructing this object is O(n) in the size of the function.
+class CodeExtractorAnalysisCache {
+  /// The allocas in the function.
+  SmallVector<AllocaInst *, 16> Allocas;
+
+  /// Base memory addresses of load/store instructions, grouped by block.
+  DenseMap<BasicBlock *, DenseSet<Value *>> BaseMemAddrs;
+
+  /// Blocks which contain instructions which may have unknown side-effects
+  /// on memory.
+  DenseSet<BasicBlock *> SideEffectingBlocks;
+
+  void findSideEffectInfoForBlock(BasicBlock &BB);
+
+public:
+  CodeExtractorAnalysisCache(Function &F);
+
+  /// Get the allocas in the function at the time the analysis was created.
+  /// Note that some of these allocas may no longer be present in the function,
+  /// due to \ref CodeExtractor::extractCodeRegion.
+  ArrayRef<AllocaInst *> getAllocas() const { return Allocas; }
+
+  /// Check whether \p BB contains an instruction thought to load from, store
+  /// to, or otherwise clobber the alloca \p Addr.
+  bool doesBlockContainClobberOfAddr(BasicBlock &BB, AllocaInst *Addr) const;
+};
+
   /// Utility class for extracting code into a new function.
   ///
   /// This utility provides a simple interface for extracting some sequence of
@@ -104,13 +137,23 @@
     ///
     /// Returns zero when called on a CodeExtractor instance where isEligible
     /// returns false.
-    Function *extractCodeRegion();
+    Function *extractCodeRegion(const CodeExtractorAnalysisCache &CEAC);
+
+    /// Verify that assumption cache isn't stale after a region is extracted.
+    /// Returns true when verifier finds errors. AssumptionCache is passed as
+    /// parameter to make this function stateless.
+    static bool verifyAssumptionCache(const Function &OldFunc,
+                                      const Function &NewFunc,
+                                      AssumptionCache *AC);
 
     /// Test whether this code extractor is eligible.
     ///
     /// Based on the blocks used when constructing the code extractor,
     /// determine whether it is eligible for extraction.
-    bool isEligible() const { return !Blocks.empty(); }
+    /// 
+    /// Checks that varargs handling (with vastart and vaend) is only done in
+    /// the outlined blocks.
+    bool isEligible() const;
 
     /// Compute the set of input values and output values for the code.
     ///
@@ -127,7 +170,9 @@
     /// region.
     ///
     /// Returns true if it is safe to do the code motion.
-    bool isLegalToShrinkwrapLifetimeMarkers(Instruction *AllocaAddr) const;
+    bool
+    isLegalToShrinkwrapLifetimeMarkers(const CodeExtractorAnalysisCache &CEAC,
+                                       Instruction *AllocaAddr) const;
 
     /// Find the set of allocas whose life ranges are contained within the
     /// outlined region.
@@ -137,7 +182,8 @@
     /// are used by the lifetime markers are also candidates for shrink-
     /// wrapping. The instructions that need to be sunk are collected in
     /// 'Allocas'.
-    void findAllocas(ValueSet &SinkCands, ValueSet &HoistCands,
+    void findAllocas(const CodeExtractorAnalysisCache &CEAC,
+                     ValueSet &SinkCands, ValueSet &HoistCands,
                      BasicBlock *&ExitBlock) const;
 
     /// Find or create a block within the outline region for placing hoisted
@@ -151,6 +197,17 @@
     BasicBlock *findOrCreateBlockForHoisting(BasicBlock *CommonExitBlock);
 
   private:
+    struct LifetimeMarkerInfo {
+      bool SinkLifeStart = false;
+      bool HoistLifeEnd = false;
+      Instruction *LifeStart = nullptr;
+      Instruction *LifeEnd = nullptr;
+    };
+
+    LifetimeMarkerInfo
+    getLifetimeMarkers(const CodeExtractorAnalysisCache &CEAC,
+                       Instruction *Addr, BasicBlock *ExitBlock) const;
+
     void severSplitPHINodesOfEntry(BasicBlock *&Header);
     void severSplitPHINodesOfExits(const SmallPtrSetImpl<BasicBlock *> &Exits);
     void splitReturnBlocks();
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/CodeMoverUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/CodeMoverUtils.h
new file mode 100644
index 0000000..630f936
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/CodeMoverUtils.h
@@ -0,0 +1,67 @@
+//===- Transform/Utils/CodeMoverUtils.h - CodeMover Utils -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions determine movements are safe on basic blocks, and
+// instructions contained within a function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CODEMOVERUTILS_H
+#define LLVM_TRANSFORMS_UTILS_CODEMOVERUTILS_H
+
+namespace llvm {
+
+class BasicBlock;
+class DependenceInfo;
+class DominatorTree;
+class Instruction;
+class PostDominatorTree;
+
+/// Return true if \p I0 and \p I1 are control flow equivalent.
+/// Two instructions are control flow equivalent if their basic blocks are
+/// control flow equivalent.
+bool isControlFlowEquivalent(const Instruction &I0, const Instruction &I1,
+                             const DominatorTree &DT,
+                             const PostDominatorTree &PDT);
+
+/// Return true if \p BB0 and \p BB1 are control flow equivalent.
+/// Two basic blocks are control flow equivalent if when one executes, the other
+/// is guaranteed to execute.
+bool isControlFlowEquivalent(const BasicBlock &BB0, const BasicBlock &BB1,
+                             const DominatorTree &DT,
+                             const PostDominatorTree &PDT);
+
+/// Return true if \p I can be safely moved before \p InsertPoint.
+bool isSafeToMoveBefore(Instruction &I, Instruction &InsertPoint,
+                        DominatorTree &DT,
+                        const PostDominatorTree *PDT = nullptr,
+                        DependenceInfo *DI = nullptr);
+
+/// Return true if all instructions (except the terminator) in \p BB can be
+/// safely moved before \p InsertPoint.
+bool isSafeToMoveBefore(BasicBlock &BB, Instruction &InsertPoint,
+                        DominatorTree &DT,
+                        const PostDominatorTree *PDT = nullptr,
+                        DependenceInfo *DI = nullptr);
+
+/// Move instructions, in an order-preserving manner, from \p FromBB to the
+/// beginning of \p ToBB when proven safe.
+void moveInstructionsToTheBeginning(BasicBlock &FromBB, BasicBlock &ToBB,
+                                    DominatorTree &DT,
+                                    const PostDominatorTree &PDT,
+                                    DependenceInfo &DI);
+
+/// Move instructions, in an order-preserving manner, from \p FromBB to the end
+/// of \p ToBB when proven safe.
+void moveInstructionsToTheEnd(BasicBlock &FromBB, BasicBlock &ToBB,
+                              DominatorTree &DT, const PostDominatorTree &PDT,
+                              DependenceInfo &DI);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_CODEMOVERUTILS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/Debugify.h b/linux-x64/clang/include/llvm/Transforms/Utils/Debugify.h
new file mode 100644
index 0000000..30e7d8e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/Debugify.h
@@ -0,0 +1,151 @@
+//===- Debugify.h - Attach synthetic debug info to everything -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file Interface to the `debugify` synthetic debug info testing utility.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORM_UTILS_DEBUGIFY_H
+#define LLVM_TRANSFORM_UTILS_DEBUGIFY_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Bitcode/BitcodeWriterPass.h"
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class DIBuilder;
+
+/// Add synthesized debug information to a module.
+///
+/// \param M The module to add debug information to.
+/// \param Functions A range of functions to add debug information to.
+/// \param Banner A prefix string to add to debug/error messages.
+/// \param ApplyToMF A call back that will add debug information to the
+///                  MachineFunction for a Function. If nullptr, then the
+///                  MachineFunction (if any) will not be modified.
+bool applyDebugifyMetadata(
+    Module &M, iterator_range<Module::iterator> Functions, StringRef Banner,
+    std::function<bool(DIBuilder &, Function &)> ApplyToMF);
+
+/// Strip out all of the metadata and debug info inserted by debugify. If no
+/// llvm.debugify module-level named metadata is present, this is a no-op.
+/// Returns true if any change was made.
+bool stripDebugifyMetadata(Module &M);
+
+llvm::ModulePass *createDebugifyModulePass();
+llvm::FunctionPass *createDebugifyFunctionPass();
+
+struct NewPMDebugifyPass : public llvm::PassInfoMixin<NewPMDebugifyPass> {
+  llvm::PreservedAnalyses run(llvm::Module &M, llvm::ModuleAnalysisManager &AM);
+};
+
+/// Track how much `debugify` information has been lost.
+struct DebugifyStatistics {
+  /// Number of missing dbg.values.
+  unsigned NumDbgValuesMissing = 0;
+
+  /// Number of dbg.values expected.
+  unsigned NumDbgValuesExpected = 0;
+
+  /// Number of instructions with empty debug locations.
+  unsigned NumDbgLocsMissing = 0;
+
+  /// Number of instructions expected to have debug locations.
+  unsigned NumDbgLocsExpected = 0;
+
+  /// Get the ratio of missing/expected dbg.values.
+  float getMissingValueRatio() const {
+    return float(NumDbgValuesMissing) / float(NumDbgLocsExpected);
+  }
+
+  /// Get the ratio of missing/expected instructions with locations.
+  float getEmptyLocationRatio() const {
+    return float(NumDbgLocsMissing) / float(NumDbgLocsExpected);
+  }
+};
+
+/// Map pass names to a per-pass DebugifyStatistics instance.
+using DebugifyStatsMap = llvm::MapVector<llvm::StringRef, DebugifyStatistics>;
+
+void exportDebugifyStats(StringRef Path, const DebugifyStatsMap &Map);
+
+llvm::ModulePass *
+createCheckDebugifyModulePass(bool Strip = false,
+                              llvm::StringRef NameOfWrappedPass = "",
+                              DebugifyStatsMap *StatsMap = nullptr);
+
+llvm::FunctionPass *
+createCheckDebugifyFunctionPass(bool Strip = false,
+                                llvm::StringRef NameOfWrappedPass = "",
+                                DebugifyStatsMap *StatsMap = nullptr);
+
+struct NewPMCheckDebugifyPass
+    : public llvm::PassInfoMixin<NewPMCheckDebugifyPass> {
+  llvm::PreservedAnalyses run(llvm::Module &M, llvm::ModuleAnalysisManager &AM);
+};
+
+struct DebugifyEachInstrumentation {
+  DebugifyStatsMap StatsMap;
+
+  void registerCallbacks(PassInstrumentationCallbacks &PIC);
+};
+
+/// DebugifyCustomPassManager wraps each pass with the debugify passes if
+/// needed.
+/// NOTE: We support legacy custom pass manager only.
+/// TODO: Add New PM support for custom pass manager.
+class DebugifyCustomPassManager : public legacy::PassManager {
+  DebugifyStatsMap DIStatsMap;
+  bool EnableDebugifyEach = false;
+
+public:
+  using super = legacy::PassManager;
+
+  void add(Pass *P) override {
+    // Wrap each pass with (-check)-debugify passes if requested, making
+    // exceptions for passes which shouldn't see -debugify instrumentation.
+    bool WrapWithDebugify = EnableDebugifyEach && !P->getAsImmutablePass() &&
+                            !isIRPrintingPass(P) && !isBitcodeWriterPass(P);
+    if (!WrapWithDebugify) {
+      super::add(P);
+      return;
+    }
+
+    // Apply -debugify/-check-debugify before/after each pass and collect
+    // debug info loss statistics.
+    PassKind Kind = P->getPassKind();
+    StringRef Name = P->getPassName();
+
+    // TODO: Implement Debugify for LoopPass.
+    switch (Kind) {
+    case PT_Function:
+      super::add(createDebugifyFunctionPass());
+      super::add(P);
+      super::add(createCheckDebugifyFunctionPass(true, Name, &DIStatsMap));
+      break;
+    case PT_Module:
+      super::add(createDebugifyModulePass());
+      super::add(P);
+      super::add(createCheckDebugifyModulePass(true, Name, &DIStatsMap));
+      break;
+    default:
+      super::add(P);
+      break;
+    }
+  }
+
+  void enableDebugifyEach() { EnableDebugifyEach = true; }
+
+  const DebugifyStatsMap &getDebugifyStatsMap() const { return DIStatsMap; }
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORM_UTILS_DEBUGIFY_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/Evaluator.h b/linux-x64/clang/include/llvm/Transforms/Utils/Evaluator.h
index bffd65f..31034d9 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/Evaluator.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/Evaluator.h
@@ -17,8 +17,8 @@
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/CallSite.h"
 #include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instructions.h"
 #include "llvm/IR/Value.h"
 #include "llvm/Support/Casting.h"
 #include <cassert>
@@ -73,15 +73,6 @@
     ValueStack.back()[V] = C;
   }
 
-  /// Given call site return callee and list of its formal arguments
-  Function *getCalleeWithFormalArgs(CallSite &CS,
-                                    SmallVector<Constant *, 8> &Formals);
-
-  /// Given call site and callee returns list of callee formal argument
-  /// values converting them when necessary
-  bool getFormalParams(CallSite &CS, Function *F,
-                       SmallVector<Constant *, 8> &Formals);
-
   /// Casts call result to a type of bitcast call expression
   Constant *castCallResultIfNeeded(Value *CallExpr, Constant *RV);
 
@@ -94,6 +85,15 @@
   }
 
 private:
+  /// Given call site return callee and list of its formal arguments
+  Function *getCalleeWithFormalArgs(CallBase &CB,
+                                    SmallVectorImpl<Constant *> &Formals);
+
+  /// Given call site and callee returns list of callee formal argument
+  /// values converting them when necessary
+  bool getFormalParams(CallBase &CB, Function *F,
+                       SmallVectorImpl<Constant *> &Formals);
+
   Constant *ComputeLoadResult(Constant *P);
 
   /// As we compute SSA register values, we store their contents here. The back
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/FixIrreducible.h b/linux-x64/clang/include/llvm/Transforms/Utils/FixIrreducible.h
new file mode 100644
index 0000000..0c00b7b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/FixIrreducible.h
@@ -0,0 +1,20 @@
+//===- FixIrreducible.h - Convert irreducible control-flow into loops -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_FIXIRREDUCIBLE_H
+#define LLVM_TRANSFORMS_UTILS_FIXIRREDUCIBLE_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+struct FixIrreduciblePass : PassInfoMixin<FixIrreduciblePass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_FIXIRREDUCIBLE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/FunctionComparator.h b/linux-x64/clang/include/llvm/Transforms/Utils/FunctionComparator.h
index 4e2571b..e808a50 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/FunctionComparator.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/FunctionComparator.h
@@ -332,7 +332,7 @@
   int cmpInlineAsm(const InlineAsm *L, const InlineAsm *R) const;
   int cmpAttrs(const AttributeList L, const AttributeList R) const;
   int cmpRangeMetadata(const MDNode *L, const MDNode *R) const;
-  int cmpOperandBundlesSchema(const Instruction *L, const Instruction *R) const;
+  int cmpOperandBundlesSchema(const CallBase &LCS, const CallBase &RCS) const;
 
   /// Compare two GEPs for equivalent pointer arithmetic.
   /// Parts to be compared for each comparison stage,
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/FunctionImportUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/FunctionImportUtils.h
index 9c2a9ea..acdd8ff 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/FunctionImportUtils.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/FunctionImportUtils.h
@@ -39,6 +39,19 @@
   /// as part of a different backend compilation process.
   bool HasExportedFunctions = false;
 
+  /// Set to true (only applicatable to ELF -fpic) if dso_local should be
+  /// dropped for a declaration.
+  ///
+  /// On ELF, the assembler is conservative and assumes a global default
+  /// visibility symbol can be interposable. No direct access relocation is
+  /// allowed, if the definition is not in the translation unit, even if the
+  /// definition is available in the linkage unit. Thus we need to clear
+  /// dso_local to disable direct access.
+  ///
+  /// This flag should not be set for -fno-pic or -fpie, which would
+  /// unnecessarily disable direct access.
+  bool ClearDSOLocalOnDeclarations;
+
   /// Set of llvm.*used values, in order to validate that we don't try
   /// to promote any non-renamable values.
   SmallPtrSet<GlobalValue *, 8> Used;
@@ -49,7 +62,7 @@
   DenseMap<const Comdat *, Comdat *> RenamedComdats;
 
   /// Check if we should promote the given local value to global scope.
-  bool shouldPromoteLocalToGlobal(const GlobalValue *SGV);
+  bool shouldPromoteLocalToGlobal(const GlobalValue *SGV, ValueInfo VI);
 
 #ifndef NDEBUG
   /// Check if the given value is a local that can't be renamed (promoted).
@@ -67,11 +80,9 @@
   /// import SGV as a definition, otherwise import as a declaration.
   bool doImportAsDefinition(const GlobalValue *SGV);
 
-  /// Get the name for SGV that should be used in the linked destination
-  /// module. Specifically, this handles the case where we need to rename
-  /// a local that is being promoted to global scope, which it will always
-  /// do when \p DoPromote is true (or when importing a local).
-  std::string getName(const GlobalValue *SGV, bool DoPromote);
+  /// Get the name for a local SGV that should be promoted and renamed to global
+  /// scope in the linked destination module.
+  std::string getPromotedName(const GlobalValue *SGV);
 
   /// Process globals so that they can be used in ThinLTO. This includes
   /// promoting local variables so that they can be reference externally by
@@ -87,10 +98,11 @@
   GlobalValue::LinkageTypes getLinkage(const GlobalValue *SGV, bool DoPromote);
 
 public:
-  FunctionImportGlobalProcessing(
-      Module &M, const ModuleSummaryIndex &Index,
-      SetVector<GlobalValue *> *GlobalsToImport = nullptr)
-      : M(M), ImportIndex(Index), GlobalsToImport(GlobalsToImport) {
+  FunctionImportGlobalProcessing(Module &M, const ModuleSummaryIndex &Index,
+                                 SetVector<GlobalValue *> *GlobalsToImport,
+                                 bool ClearDSOLocalOnDeclarations)
+      : M(M), ImportIndex(Index), GlobalsToImport(GlobalsToImport),
+        ClearDSOLocalOnDeclarations(ClearDSOLocalOnDeclarations) {
     // If we have a ModuleSummaryIndex but no function to import,
     // then this is the primary module being compiled in a ThinLTO
     // backend compilation, and we need to see if it has functions that
@@ -107,15 +119,13 @@
   }
 
   bool run();
-
-  static bool doImportAsDefinition(const GlobalValue *SGV,
-                                   SetVector<GlobalValue *> *GlobalsToImport);
 };
 
 /// Perform in-place global value handling on the given Module for
 /// exported local functions renamed and promoted for ThinLTO.
 bool renameModuleForThinLTO(
     Module &M, const ModuleSummaryIndex &Index,
+    bool ClearDSOLocalOnDeclarations,
     SetVector<GlobalValue *> *GlobalsToImport = nullptr);
 
 /// Compute synthetic function entry counts.
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/GuardUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/GuardUtils.h
index 3b365c5..7ab5d9e 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/GuardUtils.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/GuardUtils.h
@@ -14,15 +14,30 @@
 
 namespace llvm {
 
+class BranchInst;
 class CallInst;
 class Function;
+class Value;
 
 /// Splits control flow at point of \p Guard, replacing it with explicit branch
 /// by the condition of guard's first argument. The taken branch then goes to
 /// the block that contains  \p Guard's successors, and the non-taken branch
 /// goes to a newly-created deopt block that contains a sole call of the
-/// deoptimize function \p DeoptIntrinsic.
-void makeGuardControlFlowExplicit(Function *DeoptIntrinsic, CallInst *Guard);
+/// deoptimize function \p DeoptIntrinsic.  If 'UseWC' is set, preserve the
+/// widenable nature of the guard by lowering to equivelent form.  If not set,
+/// lower to a form without widenable semantics.
+void makeGuardControlFlowExplicit(Function *DeoptIntrinsic, CallInst *Guard,
+                                  bool UseWC);
+
+/// Given a branch we know is widenable (defined per Analysis/GuardUtils.h),
+/// widen it such that condition 'NewCond' is also known to hold on the taken
+/// path.  Branch remains widenable after transform.
+void widenWidenableBranch(BranchInst *WidenableBR, Value *NewCond);
+
+/// Given a branch we know is widenable (defined per Analysis/GuardUtils.h),
+/// *set* it's condition such that (only) 'Cond' is known to hold on the taken
+/// path and that the branch remains widenable after transform.
+void setWidenableBranchCond(BranchInst *WidenableBR, Value *Cond);
 
 } // llvm
 
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/InjectTLIMappings.h b/linux-x64/clang/include/llvm/Transforms/Utils/InjectTLIMappings.h
new file mode 100644
index 0000000..84e4fee
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/InjectTLIMappings.h
@@ -0,0 +1,37 @@
+//===- InjectTLIMAppings.h - TLI to VFABI attribute injection  ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Populates the VFABI attribute with the scalar-to-vector mappings
+// from the TargetLibraryInfo.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_UTILS_INJECTTLIMAPPINGS_H
+#define LLVM_TRANSFORMS_UTILS_INJECTTLIMAPPINGS_H
+
+#include "llvm/IR/PassManager.h"
+#include "llvm/InitializePasses.h"
+
+namespace llvm {
+class InjectTLIMappings : public PassInfoMixin<InjectTLIMappings> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+// Legacy pass
+class InjectTLIMappingsLegacy : public FunctionPass {
+public:
+  static char ID;
+  InjectTLIMappingsLegacy() : FunctionPass(ID) {
+    initializeInjectTLIMappingsLegacyPass(*PassRegistry::getPassRegistry());
+  }
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+  bool runOnFunction(Function &F) override;
+};
+
+} // End namespace llvm
+#endif // LLVM_TRANSFORMS_UTILS_INJECTTLIMAPPINGS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/InstructionNamer.h b/linux-x64/clang/include/llvm/Transforms/Utils/InstructionNamer.h
new file mode 100644
index 0000000..4f4cc26
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/InstructionNamer.h
@@ -0,0 +1,20 @@
+//===- InstructionNamer.h - Give anonymous instructions names -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_INSTRUCTIONNAMER_H
+#define LLVM_TRANSFORMS_UTILS_INSTRUCTIONNAMER_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+struct InstructionNamerPass : PassInfoMixin<InstructionNamerPass> {
+  PreservedAnalyses run(Function &, FunctionAnalysisManager &);
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_INSTRUCTIONNAMER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/Local.h b/linux-x64/clang/include/llvm/Transforms/Utils/Local.h
index ff516f2..ebfb62a 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/Local.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/Local.h
@@ -19,35 +19,39 @@
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/TinyPtrVector.h"
-#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/DomTreeUpdater.h"
 #include "llvm/Analysis/Utils/Local.h"
 #include "llvm/IR/Constant.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/DataLayout.h"
 #include "llvm/IR/Dominators.h"
-#include "llvm/IR/GetElementPtrTypeIterator.h"
 #include "llvm/IR/Operator.h"
 #include "llvm/IR/Type.h"
 #include "llvm/IR/User.h"
 #include "llvm/IR/Value.h"
+#include "llvm/IR/ValueHandle.h"
 #include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Transforms/Utils/SimplifyCFGOptions.h"
 #include <cstdint>
 #include <limits>
 
 namespace llvm {
 
+class AAResults;
 class AllocaInst;
 class AssumptionCache;
 class BasicBlock;
 class BranchInst;
+class CallBase;
 class CallInst;
+class DbgDeclareInst;
 class DbgVariableIntrinsic;
 class DbgValueInst;
 class DIBuilder;
+class DomTreeUpdater;
 class Function;
 class Instruction;
-class LazyValueInfo;
+class InvokeInst;
 class LoadInst;
 class MDNode;
 class MemorySSAUpdater;
@@ -56,57 +60,6 @@
 class TargetLibraryInfo;
 class TargetTransformInfo;
 
-/// A set of parameters used to control the transforms in the SimplifyCFG pass.
-/// Options may change depending on the position in the optimization pipeline.
-/// For example, canonical form that includes switches and branches may later be
-/// replaced by lookup tables and selects.
-struct SimplifyCFGOptions {
-  int BonusInstThreshold;
-  bool ForwardSwitchCondToPhi;
-  bool ConvertSwitchToLookupTable;
-  bool NeedCanonicalLoop;
-  bool SinkCommonInsts;
-  AssumptionCache *AC;
-
-  SimplifyCFGOptions(unsigned BonusThreshold = 1,
-                     bool ForwardSwitchCond = false,
-                     bool SwitchToLookup = false, bool CanonicalLoops = true,
-                     bool SinkCommon = false,
-                     AssumptionCache *AssumpCache = nullptr)
-      : BonusInstThreshold(BonusThreshold),
-        ForwardSwitchCondToPhi(ForwardSwitchCond),
-        ConvertSwitchToLookupTable(SwitchToLookup),
-        NeedCanonicalLoop(CanonicalLoops),
-        SinkCommonInsts(SinkCommon),
-        AC(AssumpCache) {}
-
-  // Support 'builder' pattern to set members by name at construction time.
-  SimplifyCFGOptions &bonusInstThreshold(int I) {
-    BonusInstThreshold = I;
-    return *this;
-  }
-  SimplifyCFGOptions &forwardSwitchCondToPhi(bool B) {
-    ForwardSwitchCondToPhi = B;
-    return *this;
-  }
-  SimplifyCFGOptions &convertSwitchToLookupTable(bool B) {
-    ConvertSwitchToLookupTable = B;
-    return *this;
-  }
-  SimplifyCFGOptions &needCanonicalLoops(bool B) {
-    NeedCanonicalLoop = B;
-    return *this;
-  }
-  SimplifyCFGOptions &sinkCommonInsts(bool B) {
-    SinkCommonInsts = B;
-    return *this;
-  }
-  SimplifyCFGOptions &setAssumptionCache(AssumptionCache *Cache) {
-    AC = Cache;
-    return *this;
-  }
-};
-
 //===----------------------------------------------------------------------===//
 //  Local constant propagation.
 //
@@ -142,7 +95,9 @@
 /// recursively. Return true if any instructions were deleted.
 bool RecursivelyDeleteTriviallyDeadInstructions(
     Value *V, const TargetLibraryInfo *TLI = nullptr,
-    MemorySSAUpdater *MSSAU = nullptr);
+    MemorySSAUpdater *MSSAU = nullptr,
+    std::function<void(Value *)> AboutToDeleteCallback =
+        std::function<void(Value *)>());
 
 /// Delete all of the instructions in `DeadInsts`, and all other instructions
 /// that deleting these in turn causes to be trivially dead.
@@ -153,8 +108,20 @@
 /// `DeadInsts` will be used as scratch storage for this routine and will be
 /// empty afterward.
 void RecursivelyDeleteTriviallyDeadInstructions(
-    SmallVectorImpl<Instruction *> &DeadInsts,
-    const TargetLibraryInfo *TLI = nullptr, MemorySSAUpdater *MSSAU = nullptr);
+    SmallVectorImpl<WeakTrackingVH> &DeadInsts,
+    const TargetLibraryInfo *TLI = nullptr, MemorySSAUpdater *MSSAU = nullptr,
+    std::function<void(Value *)> AboutToDeleteCallback =
+        std::function<void(Value *)>());
+
+/// Same functionality as RecursivelyDeleteTriviallyDeadInstructions, but allow
+/// instructions that are not trivially dead. These will be ignored.
+/// Returns true if any changes were made, i.e. any instructions trivially dead
+/// were found and deleted.
+bool RecursivelyDeleteTriviallyDeadInstructionsPermissive(
+    SmallVectorImpl<WeakTrackingVH> &DeadInsts,
+    const TargetLibraryInfo *TLI = nullptr, MemorySSAUpdater *MSSAU = nullptr,
+    std::function<void(Value *)> AboutToDeleteCallback =
+        std::function<void(Value *)>());
 
 /// If the specified value is an effectively dead PHI node, due to being a
 /// def-use chain of single-use nodes that either forms a cycle or is terminated
@@ -162,7 +129,8 @@
 /// operands trivially dead, delete them too, recursively. Return true if a
 /// change was made.
 bool RecursivelyDeleteDeadPHINode(PHINode *PN,
-                                  const TargetLibraryInfo *TLI = nullptr);
+                                  const TargetLibraryInfo *TLI = nullptr,
+                                  MemorySSAUpdater *MSSAU = nullptr);
 
 /// Scan the specified basic block and try to simplify any instructions in it
 /// and recursively delete dead instructions.
@@ -182,20 +150,6 @@
 //  Control Flow Graph Restructuring.
 //
 
-/// Like BasicBlock::removePredecessor, this method is called when we're about
-/// to delete Pred as a predecessor of BB. If BB contains any PHI nodes, this
-/// drops the entries in the PHI nodes for Pred.
-///
-/// Unlike the removePredecessor method, this attempts to simplify uses of PHI
-/// nodes that collapse into identity values.  For example, if we have:
-///   x = phi(1, 0, 0, 0)
-///   y = and x, z
-///
-/// .. and delete the predecessor corresponding to the '1', this will attempt to
-/// recursively fold the 'and' to 0.
-void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
-                                  DomTreeUpdater *DTU = nullptr);
-
 /// BB is a block with one predecessor and its predecessor is known to have one
 /// successor (BB!). Eliminate the edge between them, moving the instructions in
 /// the predecessor into BB. This deletes the predecessor block.
@@ -219,19 +173,23 @@
 /// It returns true if a modification was made, possibly deleting the basic
 /// block that was pointed to. LoopHeaders is an optional input parameter
 /// providing the set of loop headers that SimplifyCFG should not eliminate.
+extern cl::opt<bool> RequireAndPreserveDomTree;
 bool simplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
+                 DomTreeUpdater *DTU = nullptr,
                  const SimplifyCFGOptions &Options = {},
                  SmallPtrSetImpl<BasicBlock *> *LoopHeaders = nullptr);
 
 /// This function is used to flatten a CFG. For example, it uses parallel-and
 /// and parallel-or mode to collapse if-conditions and merge if-regions with
 /// identical statements.
-bool FlattenCFG(BasicBlock *BB, AliasAnalysis *AA = nullptr);
+bool FlattenCFG(BasicBlock *BB, AAResults *AA = nullptr);
 
 /// If this basic block is ONLY a setcc and a branch, and if a predecessor
 /// branches to us and one of our successors, fold the setcc into the
 /// predecessor and use logical operations to pick the right destination.
-bool FoldBranchToCommonDest(BranchInst *BI, MemorySSAUpdater *MSSAU = nullptr,
+bool FoldBranchToCommonDest(BranchInst *BI, llvm::DomTreeUpdater *DTU = nullptr,
+                            MemorySSAUpdater *MSSAU = nullptr,
+                            const TargetTransformInfo *TTI = nullptr,
                             unsigned BonusInstThreshold = 1);
 
 /// This function takes a virtual register computed by an Instruction and
@@ -257,20 +215,29 @@
 /// so if alignment is important, a more reliable approach is to simply align
 /// all global variables and allocation instructions to their preferred
 /// alignment from the beginning.
-unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
-                                    const DataLayout &DL,
-                                    const Instruction *CxtI = nullptr,
-                                    AssumptionCache *AC = nullptr,
-                                    const DominatorTree *DT = nullptr);
+Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
+                                 const DataLayout &DL,
+                                 const Instruction *CxtI = nullptr,
+                                 AssumptionCache *AC = nullptr,
+                                 const DominatorTree *DT = nullptr);
 
 /// Try to infer an alignment for the specified pointer.
-inline unsigned getKnownAlignment(Value *V, const DataLayout &DL,
-                                  const Instruction *CxtI = nullptr,
-                                  AssumptionCache *AC = nullptr,
-                                  const DominatorTree *DT = nullptr) {
-  return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT);
+inline Align getKnownAlignment(Value *V, const DataLayout &DL,
+                               const Instruction *CxtI = nullptr,
+                               AssumptionCache *AC = nullptr,
+                               const DominatorTree *DT = nullptr) {
+  return getOrEnforceKnownAlignment(V, MaybeAlign(), DL, CxtI, AC, DT);
 }
 
+/// Create a call that matches the invoke \p II in terms of arguments,
+/// attributes, debug information, etc. The call is not placed in a block and it
+/// will not have a name. The invoke instruction is not removed, nor are the
+/// uses replaced by the new call.
+CallInst *createCallMatchingInvoke(InvokeInst *II);
+
+/// This function converts the specified invoek into a normall call.
+void changeToCall(InvokeInst *II, DomTreeUpdater *DTU = nullptr);
+
 ///===---------------------------------------------------------------------===//
 ///  Dbg Intrinsic utilities
 ///
@@ -303,6 +270,10 @@
 /// dbg.addr intrinsics.
 TinyPtrVector<DbgVariableIntrinsic *> FindDbgAddrUses(Value *V);
 
+/// Like \c FindDbgAddrUses, but only returns dbg.declare intrinsics, not
+/// dbg.addr.
+TinyPtrVector<DbgDeclareInst *> FindDbgDeclareUses(Value *V);
+
 /// Finds the llvm.dbg.value intrinsics describing a value.
 void findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V);
 
@@ -314,20 +285,9 @@
 /// additional DW_OP_deref is prepended to the expression. If Offset
 /// is non-zero, a constant displacement is added to the expression
 /// (between the optional Deref operations). Offset can be negative.
-bool replaceDbgDeclare(Value *Address, Value *NewAddress,
-                       Instruction *InsertBefore, DIBuilder &Builder,
+bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder,
                        uint8_t DIExprFlags, int Offset);
 
-/// Replaces llvm.dbg.declare instruction when the alloca it describes
-/// is replaced with a new value. If Deref is true, an additional
-/// DW_OP_deref is prepended to the expression. If Offset is non-zero,
-/// a constant displacement is added to the expression (between the
-/// optional Deref operations). Offset can be negative. The new
-/// llvm.dbg.declare is inserted immediately after AI.
-bool replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
-                                DIBuilder &Builder, uint8_t DIExprFlags,
-                                int Offset);
-
 /// Replaces multiple llvm.dbg.value instructions when the alloca it describes
 /// is replaced with a new value. If Offset is non-zero, a constant displacement
 /// is added to the expression (after the mandatory Deref). Offset can be
@@ -336,18 +296,17 @@
 void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
                               DIBuilder &Builder, int Offset = 0);
 
-/// Finds alloca where the value comes from.
-AllocaInst *findAllocaForValue(Value *V,
-                               DenseMap<Value *, AllocaInst *> &AllocaForValue);
-
 /// Assuming the instruction \p I is going to be deleted, attempt to salvage
-/// debug users of \p I by writing the effect of \p I in a DIExpression.
-/// Returns true if any debug users were updated.
-bool salvageDebugInfo(Instruction &I);
+/// debug users of \p I by writing the effect of \p I in a DIExpression. If it
+/// cannot be salvaged changes its debug uses to undef.
+void salvageDebugInfo(Instruction &I);
+
 
 /// Implementation of salvageDebugInfo, applying only to instructions in
-/// \p Insns, rather than all debug users of \p I.
-bool salvageDebugInfoForDbgValues(Instruction &I,
+/// \p Insns, rather than all debug users from findDbgUsers( \p I).
+/// Returns true if any debug users were updated.
+/// Mark undef if salvaging cannot be completed.
+void salvageDebugInfoForDbgValues(Instruction &I,
                                   ArrayRef<DbgVariableIntrinsic *> Insns);
 
 /// Given an instruction \p I and DIExpression \p DIExpr operating on it, write
@@ -374,9 +333,13 @@
 bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint,
                            DominatorTree &DT);
 
-/// Remove all instructions from a basic block other than it's terminator
-/// and any present EH pad instructions.
-unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB);
+/// Remove all instructions from a basic block other than its terminator
+/// and any present EH pad instructions. Returns a pair where the first element
+/// is the number of instructions (excluding debug info instrinsics) that have
+/// been removed, and the second element is the number of debug info intrinsics
+/// that have been removed.
+std::pair<unsigned, unsigned>
+removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB);
 
 /// Insert an unreachable instruction before the specified
 /// instruction, making it and the rest of the code in the block dead.
@@ -403,8 +366,7 @@
 /// Remove all blocks that can not be reached from the function's entry.
 ///
 /// Returns true if any basic block was removed.
-bool removeUnreachableBlocks(Function &F, LazyValueInfo *LVI = nullptr,
-                             DomTreeUpdater *DTU = nullptr,
+bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU = nullptr,
                              MemorySSAUpdater *MSSAU = nullptr);
 
 /// Combine the metadata of two instructions so that K can replace J. Some
@@ -424,6 +386,10 @@
 void combineMetadataForCSE(Instruction *K, const Instruction *J,
                            bool DoesKMove);
 
+/// Copy the metadata from the source instruction to the destination (the
+/// replacement for the source instruction).
+void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source);
+
 /// Patch the replacement so that it is not more restrictive than the value
 /// being replaced. It assumes that the replacement does not get moved from
 /// its original position.
@@ -514,6 +480,13 @@
 /// value?
 bool canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx);
 
+//===----------------------------------------------------------------------===//
+//  Value helper functions
+//
+
+/// Invert the given true/false value, possibly reusing an existing copy.
+Value *invertCondition(Value *Condition);
+
 } // end namespace llvm
 
 #endif // LLVM_TRANSFORMS_UTILS_LOCAL_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LoopPeel.h b/linux-x64/clang/include/llvm/Transforms/Utils/LoopPeel.h
new file mode 100644
index 0000000..8f857e1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LoopPeel.h
@@ -0,0 +1,40 @@
+//===- llvm/Transforms/Utils/LoopPeel.h ----- Peeling utilities -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines some loop peeling utilities. It does not define any
+// actual pass or policy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOOPPEEL_H
+#define LLVM_TRANSFORMS_UTILS_LOOPPEEL_H
+
+#include "llvm/Analysis/TargetTransformInfo.h"
+
+namespace llvm {
+
+bool canPeel(Loop *L);
+
+bool peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI, ScalarEvolution *SE,
+              DominatorTree *DT, AssumptionCache *AC, bool PreserveLCSSA);
+
+TargetTransformInfo::PeelingPreferences
+gatherPeelingPreferences(Loop *L, ScalarEvolution &SE,
+                         const TargetTransformInfo &TTI,
+                         Optional<bool> UserAllowPeeling,
+                         Optional<bool> UserAllowProfileBasedPeeling,
+                         bool UnrollingSpecficValues = false);
+
+void computePeelCount(Loop *L, unsigned LoopSize,
+                      TargetTransformInfo::PeelingPreferences &PP,
+                      unsigned &TripCount, ScalarEvolution &SE,
+                      unsigned Threshold = UINT_MAX);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_LOOPPEEL_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LoopSimplify.h b/linux-x64/clang/include/llvm/Transforms/Utils/LoopSimplify.h
index 2c1df79..d017fd1 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/LoopSimplify.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LoopSimplify.h
@@ -38,14 +38,16 @@
 #ifndef LLVM_TRANSFORMS_UTILS_LOOPSIMPLIFY_H
 #define LLVM_TRANSFORMS_UTILS_LOOPSIMPLIFY_H
 
-#include "llvm/Analysis/AssumptionCache.h"
-#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/IR/Dominators.h"
 #include "llvm/IR/PassManager.h"
 
 namespace llvm {
 
+class AssumptionCache;
+class DominatorTree;
+class Loop;
+class LoopInfo;
 class MemorySSAUpdater;
+class ScalarEvolution;
 
 /// This pass is responsible for loop canonicalization.
 class LoopSimplifyPass : public PassInfoMixin<LoopSimplifyPass> {
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LoopUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/LoopUtils.h
index 68bdded..940747b 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/LoopUtils.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LoopUtils.h
@@ -13,42 +13,44 @@
 #ifndef LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
 #define LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
 
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/SetVector.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/StringRef.h"
-#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/DemandedBits.h"
-#include "llvm/Analysis/EHPersonalities.h"
 #include "llvm/Analysis/IVDescriptors.h"
-#include "llvm/Analysis/MustExecute.h"
 #include "llvm/Analysis/TargetTransformInfo.h"
-#include "llvm/IR/Dominators.h"
-#include "llvm/IR/IRBuilder.h"
-#include "llvm/IR/InstrTypes.h"
-#include "llvm/IR/Operator.h"
-#include "llvm/IR/ValueHandle.h"
-#include "llvm/Support/Casting.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
 
 namespace llvm {
 
+template <typename T> class DomTreeNodeBase;
+using DomTreeNode = DomTreeNodeBase<BasicBlock>;
+class AAResults;
 class AliasSet;
 class AliasSetTracker;
 class BasicBlock;
-class DataLayout;
+class BlockFrequencyInfo;
+class IRBuilderBase;
 class Loop;
 class LoopInfo;
 class MemoryAccess;
+class MemorySSA;
 class MemorySSAUpdater;
 class OptimizationRemarkEmitter;
-class PredicatedScalarEvolution;
 class PredIteratorCache;
 class ScalarEvolution;
 class SCEV;
+class SCEVExpander;
 class TargetLibraryInfo;
-class TargetTransformInfo;
+class LPPassManager;
+class Instruction;
+struct RuntimeCheckingPtrGroup;
+typedef std::pair<const RuntimeCheckingPtrGroup *,
+                  const RuntimeCheckingPtrGroup *>
+    RuntimePointerCheck;
+
+template <typename T> class Optional;
+template <typename T, unsigned N> class SmallSetVector;
+template <typename T, unsigned N> class SmallVector;
+template <typename T> class SmallVectorImpl;
+template <typename T, unsigned N> class SmallPriorityWorklist;
 
 BasicBlock *InsertPreheaderForLoop(Loop *L, DominatorTree *DT, LoopInfo *LI,
                                    MemorySSAUpdater *MSSAU, bool PreserveLCSSA);
@@ -72,8 +74,14 @@
 /// changes to CFG, preserved.
 ///
 /// Returns true if any modifications are made.
-bool formLCSSAForInstructions(SmallVectorImpl<Instruction *> &Worklist,
-                              DominatorTree &DT, LoopInfo &LI);
+///
+/// This function may introduce unused PHI nodes. If \p PHIsToRemove is not
+/// nullptr, those are added to it (before removing, the caller has to check if
+/// they still do not have any uses). Otherwise the PHIs are directly removed.
+bool formLCSSAForInstructions(
+    SmallVectorImpl<Instruction *> &Worklist, const DominatorTree &DT,
+    const LoopInfo &LI, ScalarEvolution *SE, IRBuilderBase &Builder,
+    SmallVectorImpl<PHINode *> *PHIsToRemove = nullptr);
 
 /// Put loop into LCSSA form.
 ///
@@ -87,7 +95,8 @@
 /// If ScalarEvolution is passed in, it will be preserved.
 ///
 /// Returns true if any modifications are made to the loop.
-bool formLCSSA(Loop &L, DominatorTree &DT, LoopInfo *LI, ScalarEvolution *SE);
+bool formLCSSA(Loop &L, const DominatorTree &DT, const LoopInfo *LI,
+               ScalarEvolution *SE);
 
 /// Put a loop nest into LCSSA form.
 ///
@@ -98,12 +107,31 @@
 /// If ScalarEvolution is passed in, it will be preserved.
 ///
 /// Returns true if any modifications are made to the loop.
-bool formLCSSARecursively(Loop &L, DominatorTree &DT, LoopInfo *LI,
+bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI,
                           ScalarEvolution *SE);
 
-struct SinkAndHoistLICMFlags {
-  bool NoOfMemAccTooLarge;
-  unsigned LicmMssaOptCounter;
+/// Flags controlling how much is checked when sinking or hoisting
+/// instructions.  The number of memory access in the loop (and whether there
+/// are too many) is determined in the constructors when using MemorySSA.
+class SinkAndHoistLICMFlags {
+public:
+  // Explicitly set limits.
+  SinkAndHoistLICMFlags(unsigned LicmMssaOptCap,
+                        unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
+                        Loop *L = nullptr, MemorySSA *MSSA = nullptr);
+  // Use default limits.
+  SinkAndHoistLICMFlags(bool IsSink, Loop *L = nullptr,
+                        MemorySSA *MSSA = nullptr);
+
+  void setIsSink(bool B) { IsSink = B; }
+  bool getIsSink() { return IsSink; }
+  bool tooManyMemoryAccesses() { return NoOfMemAccTooLarge; }
+  bool tooManyClobberingCalls() { return LicmMssaOptCounter >= LicmMssaOptCap; }
+  void incrementClobberingCalls() { ++LicmMssaOptCounter; }
+
+protected:
+  bool NoOfMemAccTooLarge = false;
+  unsigned LicmMssaOptCounter = 0;
   unsigned LicmMssaOptCap;
   unsigned LicmMssaNoAccForPromotionCap;
   bool IsSink;
@@ -113,27 +141,29 @@
 /// dominated by the specified block, and that are in the current loop) in
 /// reverse depth first order w.r.t the DominatorTree. This allows us to visit
 /// uses before definitions, allowing us to sink a loop body in one pass without
-/// iteration. Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree,
-/// DataLayout, TargetLibraryInfo, Loop, AliasSet information for all
+/// iteration. Takes DomTreeNode, AAResults, LoopInfo, DominatorTree,
+/// BlockFrequencyInfo, TargetLibraryInfo, Loop, AliasSet information for all
 /// instructions of the loop and loop safety information as
 /// arguments. Diagnostics is emitted via \p ORE. It returns changed status.
-bool sinkRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
-                TargetLibraryInfo *, TargetTransformInfo *, Loop *,
-                AliasSetTracker *, MemorySSAUpdater *, ICFLoopSafetyInfo *,
+bool sinkRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *,
+                BlockFrequencyInfo *, TargetLibraryInfo *,
+                TargetTransformInfo *, Loop *, AliasSetTracker *,
+                MemorySSAUpdater *, ICFLoopSafetyInfo *,
                 SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *);
 
 /// Walk the specified region of the CFG (defined by all blocks
 /// dominated by the specified block, and that are in the current loop) in depth
 /// first order w.r.t the DominatorTree.  This allows us to visit definitions
 /// before uses, allowing us to hoist a loop body in one pass without iteration.
-/// Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree, DataLayout,
-/// TargetLibraryInfo, Loop, AliasSet information for all instructions of the
-/// loop and loop safety information as arguments. Diagnostics is emitted via \p
-/// ORE. It returns changed status.
-bool hoistRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
-                 TargetLibraryInfo *, Loop *, AliasSetTracker *,
-                 MemorySSAUpdater *, ICFLoopSafetyInfo *,
-                 SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *);
+/// Takes DomTreeNode, AAResults, LoopInfo, DominatorTree,
+/// BlockFrequencyInfo, TargetLibraryInfo, Loop, AliasSet information for all
+/// instructions of the loop and loop safety information as arguments.
+/// Diagnostics is emitted via \p ORE. It returns changed status.
+bool hoistRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *,
+                 BlockFrequencyInfo *, TargetLibraryInfo *, Loop *,
+                 AliasSetTracker *, MemorySSAUpdater *, ScalarEvolution *,
+                 ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &,
+                 OptimizationRemarkEmitter *);
 
 /// This function deletes dead loops. The caller of this function needs to
 /// guarantee that the loop is infact dead.
@@ -142,12 +172,18 @@
 ///   - The loop needs to have a Preheader
 ///   - A unique dedicated exit block must exist
 ///
-/// This also updates the relevant analysis information in \p DT, \p SE, and \p
-/// LI if pointers to those are provided.
+/// This also updates the relevant analysis information in \p DT, \p SE, \p LI
+/// and \p MSSA if pointers to those are provided.
 /// It also updates the loop PM if an updater struct is provided.
 
 void deleteDeadLoop(Loop *L, DominatorTree *DT, ScalarEvolution *SE,
-                    LoopInfo *LI);
+                    LoopInfo *LI, MemorySSA *MSSA = nullptr);
+
+/// Remove the backedge of the specified loop.  Handles loop nests and general
+/// loop structures subject to the precondition that the loop has no parent
+/// loop and has a single latch block.  Preserves all listed analyses.
+void breakLoopBackedge(Loop *L, DominatorTree &DT, ScalarEvolution &SE,
+                       LoopInfo &LI, MemorySSA *MSSA);
 
 /// Try to promote memory values to scalars by sinking stores out of
 /// the loop and moving loads to before the loop.  We do this by looping over
@@ -183,6 +219,13 @@
 /// Find named metadata for a loop with an integer value.
 llvm::Optional<int> getOptionalIntLoopAttribute(Loop *TheLoop, StringRef Name);
 
+/// Find a combination of metadata ("llvm.loop.vectorize.width" and
+/// "llvm.loop.vectorize.scalable.enable") for a loop and use it to construct a
+/// ElementCount. If the metadata "llvm.loop.vectorize.width" cannot be found
+/// then None is returned.
+Optional<ElementCount>
+getOptionalElementCountLoopAttribute(Loop *TheLoop);
+
 /// Create a new loop identifier for a loop created from a loop transformation.
 ///
 /// @param OrigLoopID The loop ID of the loop before the transformation.
@@ -215,6 +258,12 @@
 /// Look for the loop attribute that disables all transformation heuristic.
 bool hasDisableAllTransformsHint(const Loop *L);
 
+/// Look for the loop attribute that disables the LICM transformation heuristics.
+bool hasDisableLICMTransformsHint(const Loop *L);
+
+/// Look for the loop attribute that requires progress within the loop.
+bool hasMustProgress(const Loop *L);
+
 /// The mode sets how eager a transformation should be applied.
 enum TransformationMode {
   /// The pass can use heuristics to determine whether a transformation should
@@ -252,13 +301,30 @@
 /// @}
 
 /// Set input string into loop metadata by keeping other values intact.
+/// If the string is already in loop metadata update value if it is
+/// different.
 void addStringMetadataToLoop(Loop *TheLoop, const char *MDString,
                              unsigned V = 0);
 
-/// Get a loop's estimated trip count based on branch weight metadata.
+/// Returns true if Name is applied to TheLoop and enabled.
+bool getBooleanLoopAttribute(const Loop *TheLoop, StringRef Name);
+
+/// Returns a loop's estimated trip count based on branch weight metadata.
+/// In addition if \p EstimatedLoopInvocationWeight is not null it is
+/// initialized with weight of loop's latch leading to the exit.
 /// Returns 0 when the count is estimated to be 0, or None when a meaningful
 /// estimate can not be made.
-Optional<unsigned> getLoopEstimatedTripCount(Loop *L);
+Optional<unsigned>
+getLoopEstimatedTripCount(Loop *L,
+                          unsigned *EstimatedLoopInvocationWeight = nullptr);
+
+/// Set a loop's branch weight metadata to reflect that loop has \p
+/// EstimatedTripCount iterations and \p EstimatedLoopInvocationWeight exits
+/// through latch. Returns true if metadata is successfully updated, false
+/// otherwise. Note that loop must have a latch block which controls loop exit
+/// in order to succeed.
+bool setLoopEstimatedTripCount(Loop *L, unsigned EstimatedTripCount,
+                               unsigned EstimatedLoopInvocationWeight);
 
 /// Check inner loop (L) backedge count is known to be invariant on all
 /// iterations of its outer loop. If the loop has no parent, this is trivially
@@ -288,44 +354,37 @@
                         OptimizationRemarkEmitter *ORE = nullptr);
 
 /// Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
-Value *createMinMaxOp(IRBuilder<> &Builder,
-                      RecurrenceDescriptor::MinMaxRecurrenceKind RK,
-                      Value *Left, Value *Right);
+Value *createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left,
+                      Value *Right);
 
 /// Generates an ordered vector reduction using extracts to reduce the value.
-Value *
-getOrderedReduction(IRBuilder<> &Builder, Value *Acc, Value *Src, unsigned Op,
-                    RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
-                        RecurrenceDescriptor::MRK_Invalid,
-                    ArrayRef<Value *> RedOps = None);
+Value *getOrderedReduction(IRBuilderBase &Builder, Value *Acc, Value *Src,
+                           unsigned Op, RecurKind MinMaxKind = RecurKind::None,
+                           ArrayRef<Value *> RedOps = None);
 
 /// Generates a vector reduction using shufflevectors to reduce the value.
 /// Fast-math-flags are propagated using the IRBuilder's setting.
-Value *getShuffleReduction(IRBuilder<> &Builder, Value *Src, unsigned Op,
-                           RecurrenceDescriptor::MinMaxRecurrenceKind
-                               MinMaxKind = RecurrenceDescriptor::MRK_Invalid,
+Value *getShuffleReduction(IRBuilderBase &Builder, Value *Src, unsigned Op,
+                           RecurKind MinMaxKind = RecurKind::None,
                            ArrayRef<Value *> RedOps = None);
 
 /// Create a target reduction of the given vector. The reduction operation
 /// is described by the \p Opcode parameter. min/max reductions require
-/// additional information supplied in \p Flags.
+/// additional information supplied in \p RdxKind.
 /// The target is queried to determine if intrinsics or shuffle sequences are
 /// required to implement the reduction.
 /// Fast-math-flags are propagated using the IRBuilder's setting.
-Value *createSimpleTargetReduction(IRBuilder<> &B,
-                                   const TargetTransformInfo *TTI,
-                                   unsigned Opcode, Value *Src,
-                                   TargetTransformInfo::ReductionFlags Flags =
-                                       TargetTransformInfo::ReductionFlags(),
+Value *createSimpleTargetReduction(IRBuilderBase &B,
+                                   const TargetTransformInfo *TTI, Value *Src,
+                                   RecurKind RdxKind,
                                    ArrayRef<Value *> RedOps = None);
 
 /// Create a generic target reduction using a recurrence descriptor \p Desc
 /// The target is queried to determine if intrinsics or shuffle sequences are
 /// required to implement the reduction.
 /// Fast-math-flags are propagated using the RecurrenceDescriptor.
-Value *createTargetReduction(IRBuilder<> &B, const TargetTransformInfo *TTI,
-                             RecurrenceDescriptor &Desc, Value *Src,
-                             bool NoNaN = false);
+Value *createTargetReduction(IRBuilderBase &B, const TargetTransformInfo *TTI,
+                             RecurrenceDescriptor &Desc, Value *Src);
 
 /// Get the intersection (logical and) of all of the potential IR flags
 /// of each scalar operation (VL) that will be converted into a vector (I).
@@ -351,6 +410,77 @@
 bool cannotBeMinInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
                        bool Signed);
 
+enum ReplaceExitVal { NeverRepl, OnlyCheapRepl, NoHardUse, AlwaysRepl };
+
+/// If the final value of any expressions that are recurrent in the loop can
+/// be computed, substitute the exit values from the loop into any instructions
+/// outside of the loop that use the final values of the current expressions.
+/// Return the number of loop exit values that have been replaced, and the
+/// corresponding phi node will be added to DeadInsts.
+int rewriteLoopExitValues(Loop *L, LoopInfo *LI, TargetLibraryInfo *TLI,
+                          ScalarEvolution *SE, const TargetTransformInfo *TTI,
+                          SCEVExpander &Rewriter, DominatorTree *DT,
+                          ReplaceExitVal ReplaceExitValue,
+                          SmallVector<WeakTrackingVH, 16> &DeadInsts);
+
+/// Set weights for \p UnrolledLoop and \p RemainderLoop based on weights for
+/// \p OrigLoop and the following distribution of \p OrigLoop iteration among \p
+/// UnrolledLoop and \p RemainderLoop. \p UnrolledLoop receives weights that
+/// reflect TC/UF iterations, and \p RemainderLoop receives weights that reflect
+/// the remaining TC%UF iterations.
+///
+/// Note that \p OrigLoop may be equal to either \p UnrolledLoop or \p
+/// RemainderLoop in which case weights for \p OrigLoop are updated accordingly.
+/// Note also behavior is undefined if \p UnrolledLoop and \p RemainderLoop are
+/// equal. \p UF must be greater than zero.
+/// If \p OrigLoop has no profile info associated nothing happens.
+///
+/// This utility may be useful for such optimizations as unroller and
+/// vectorizer as it's typical transformation for them.
+void setProfileInfoAfterUnrolling(Loop *OrigLoop, Loop *UnrolledLoop,
+                                  Loop *RemainderLoop, uint64_t UF);
+
+/// Utility that implements appending of loops onto a worklist given a range.
+/// We want to process loops in postorder, but the worklist is a LIFO data
+/// structure, so we append to it in *reverse* postorder.
+/// For trees, a preorder traversal is a viable reverse postorder, so we
+/// actually append using a preorder walk algorithm.
+template <typename RangeT>
+void appendLoopsToWorklist(RangeT &&, SmallPriorityWorklist<Loop *, 4> &);
+/// Utility that implements appending of loops onto a worklist given a range.
+/// It has the same behavior as appendLoopsToWorklist, but assumes the range of
+/// loops has already been reversed, so it processes loops in the given order.
+template <typename RangeT>
+void appendReversedLoopsToWorklist(RangeT &&,
+                                   SmallPriorityWorklist<Loop *, 4> &);
+
+/// Utility that implements appending of loops onto a worklist given LoopInfo.
+/// Calls the templated utility taking a Range of loops, handing it the Loops
+/// in LoopInfo, iterated in reverse. This is because the loops are stored in
+/// RPO w.r.t. the control flow graph in LoopInfo. For the purpose of unrolling,
+/// loop deletion, and LICM, we largely want to work forward across the CFG so
+/// that we visit defs before uses and can propagate simplifications from one
+/// loop nest into the next. Calls appendReversedLoopsToWorklist with the
+/// already reversed loops in LI.
+/// FIXME: Consider changing the order in LoopInfo.
+void appendLoopsToWorklist(LoopInfo &, SmallPriorityWorklist<Loop *, 4> &);
+
+/// Recursively clone the specified loop and all of its children,
+/// mapping the blocks with the specified map.
+Loop *cloneLoop(Loop *L, Loop *PL, ValueToValueMapTy &VM,
+                LoopInfo *LI, LPPassManager *LPM);
+
+/// Add code that checks at runtime if the accessed arrays in \p PointerChecks
+/// overlap.
+///
+/// Returns a pair of instructions where the first element is the first
+/// instruction generated in possibly a sequence of instructions and the
+/// second value is the final comparator value or NULL if no check is needed.
+std::pair<Instruction *, Instruction *>
+addRuntimeChecks(Instruction *Loc, Loop *TheLoop,
+                 const SmallVectorImpl<RuntimePointerCheck> &PointerChecks,
+                 ScalarEvolution *SE);
+
 } // end namespace llvm
 
 #endif // LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LoopVersioning.h b/linux-x64/clang/include/llvm/Transforms/Utils/LoopVersioning.h
index 355c4d7..4a8831e 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/LoopVersioning.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LoopVersioning.h
@@ -15,8 +15,8 @@
 #ifndef LLVM_TRANSFORMS_UTILS_LOOPVERSIONING_H
 #define LLVM_TRANSFORMS_UTILS_LOOPVERSIONING_H
 
-#include "llvm/Analysis/LoopAccessAnalysis.h"
 #include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/IR/PassManager.h"
 #include "llvm/Transforms/Utils/LoopUtils.h"
 #include "llvm/Transforms/Utils/ValueMapper.h"
 
@@ -25,7 +25,12 @@
 class Loop;
 class LoopAccessInfo;
 class LoopInfo;
-class ScalarEvolution;
+struct RuntimeCheckingPtrGroup;
+typedef std::pair<const RuntimeCheckingPtrGroup *,
+                  const RuntimeCheckingPtrGroup *>
+    RuntimePointerCheck;
+
+template <typename T> class ArrayRef;
 
 /// This class emits a version of the loop where run-time checks ensure
 /// that may-alias pointers can't overlap.
@@ -38,9 +43,9 @@
   /// It uses runtime check provided by the user. If \p UseLAIChecks is true,
   /// we will retain the default checks made by LAI. Otherwise, construct an
   /// object having no checks and we expect the user to add them.
-  LoopVersioning(const LoopAccessInfo &LAI, Loop *L, LoopInfo *LI,
-                 DominatorTree *DT, ScalarEvolution *SE,
-                 bool UseLAIChecks = true);
+  LoopVersioning(const LoopAccessInfo &LAI,
+                 ArrayRef<RuntimePointerCheck> Checks, Loop *L, LoopInfo *LI,
+                 DominatorTree *DT, ScalarEvolution *SE);
 
   /// Performs the CFG manipulation part of versioning the loop including
   /// the DominatorTree and LoopInfo updates.
@@ -70,13 +75,6 @@
   /// loop may alias (i.e. one of the memchecks failed).
   Loop *getNonVersionedLoop() { return NonVersionedLoop; }
 
-  /// Sets the runtime alias checks for versioning the loop.
-  void setAliasChecks(
-      SmallVector<RuntimePointerChecking::PointerCheck, 4> Checks);
-
-  /// Sets the runtime SCEV checks for versioning the loop.
-  void setSCEVChecks(SCEVUnionPredicate Check);
-
   /// Annotate memory instructions in the versioned loop with no-alias
   /// metadata based on the memchecks issued.
   ///
@@ -122,22 +120,20 @@
   ValueToValueMapTy VMap;
 
   /// The set of alias checks that we are versioning for.
-  SmallVector<RuntimePointerChecking::PointerCheck, 4> AliasChecks;
+  SmallVector<RuntimePointerCheck, 4> AliasChecks;
 
   /// The set of SCEV checks that we are versioning for.
-  SCEVUnionPredicate Preds;
+  const SCEVUnionPredicate &Preds;
 
   /// Maps a pointer to the pointer checking group that the pointer
   /// belongs to.
-  DenseMap<const Value *, const RuntimePointerChecking::CheckingPtrGroup *>
-      PtrToGroup;
+  DenseMap<const Value *, const RuntimeCheckingPtrGroup *> PtrToGroup;
 
   /// The alias scope corresponding to a pointer checking group.
-  DenseMap<const RuntimePointerChecking::CheckingPtrGroup *, MDNode *>
-      GroupToScope;
+  DenseMap<const RuntimeCheckingPtrGroup *, MDNode *> GroupToScope;
 
   /// The list of alias scopes that a pointer checking group can't alias.
-  DenseMap<const RuntimePointerChecking::CheckingPtrGroup *, MDNode *>
+  DenseMap<const RuntimeCheckingPtrGroup *, MDNode *>
       GroupToNonAliasingScopeList;
 
   /// Analyses used.
@@ -146,6 +142,14 @@
   DominatorTree *DT;
   ScalarEvolution *SE;
 };
+
+/// Expose LoopVersioning as a pass.  Currently this is only used for
+/// unit-testing.  It adds all memchecks necessary to remove all may-aliasing
+/// array accesses from the loop.
+class LoopVersioningPass : public PassInfoMixin<LoopVersioningPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+};
 }
 
 #endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LowerMemIntrinsics.h b/linux-x64/clang/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
index 8e9d7b5..8d09560 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
@@ -23,12 +23,13 @@
 class MemSetInst;
 class TargetTransformInfo;
 class Value;
+struct Align;
 
 /// Emit a loop implementing the semantics of llvm.memcpy where the size is not
 /// a compile-time constant. Loop will be insterted at \p InsertBefore.
 void createMemCpyLoopUnknownSize(Instruction *InsertBefore, Value *SrcAddr,
                                  Value *DstAddr, Value *CopyLen,
-                                 unsigned SrcAlign, unsigned DestAlign,
+                                 Align SrcAlign, Align DestAlign,
                                  bool SrcIsVolatile, bool DstIsVolatile,
                                  const TargetTransformInfo &TTI);
 
@@ -36,11 +37,10 @@
 /// compile time constant. Loop is inserted at \p InsertBefore.
 void createMemCpyLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr,
                                Value *DstAddr, ConstantInt *CopyLen,
-                               unsigned SrcAlign, unsigned DestAlign,
+                               Align SrcAlign, Align DestAlign,
                                bool SrcIsVolatile, bool DstIsVolatile,
                                const TargetTransformInfo &TTI);
 
-
 /// Expand \p MemCpy as a loop. \p MemCpy is not deleted.
 void expandMemCpyAsLoop(MemCpyInst *MemCpy, const TargetTransformInfo &TTI);
 
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LowerSwitch.h b/linux-x64/clang/include/llvm/Transforms/Utils/LowerSwitch.h
new file mode 100644
index 0000000..9708698
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LowerSwitch.h
@@ -0,0 +1,26 @@
+//===- LowerSwitch.h - Eliminate Switch instructions ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// The LowerSwitch transformation rewrites switch instructions with a sequence
+// of branches, which allows targets to get away with not implementing the
+// switch instruction until it is convenient.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOWERSWITCH_H
+#define LLVM_TRANSFORMS_UTILS_LOWERSWITCH_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+struct LowerSwitchPass : public PassInfoMixin<LowerSwitchPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_LOWERSWITCH_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/MatrixUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/MatrixUtils.h
new file mode 100644
index 0000000..39a0d4b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/MatrixUtils.h
@@ -0,0 +1,94 @@
+//===- MatrixUtils.h - Utilities to lower matrix intrinsics -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Utilities for generating tiled loops for matrix operations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_MATRIXUTILS_H
+#define LLVM_TRANSFORMS_UTILS_MATRIXUTILS_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+class DomTreeUpdater;
+class BasicBlock;
+class Value;
+class Loop;
+class LoopInfo;
+class IRBuilderBase;
+
+/// A helper struct to create IR loop nests for tiling in IR of the following
+/// form:
+///   for CurrentColumn = 0..NumColumns
+///     for CurrentRow = 0..NumRows
+///       for CurrentInner = 0..NumInner
+struct TileInfo {
+  /// Number of rows of the matrix.
+  unsigned NumRows;
+
+  /// Number of columns of the matrix.
+  unsigned NumColumns;
+
+  /// Number of columns of the first matrix of a multiply /
+  /// number of rows of the second matrix of a multiply.
+  unsigned NumInner;
+
+  /// Number of rows/columns in a tile.
+  unsigned TileSize = -1;
+
+  /// Start row of the current tile to compute.
+  Value *CurrentRow;
+
+  /// Start column of the current tile to compute.
+  Value *CurrentCol;
+
+  /// Current tile offset during the tile computation.
+  Value *CurrentK;
+
+  /// Header of the outermost loop iterating from 0..NumColumns.
+  BasicBlock *ColumnLoopHeader = nullptr;
+
+  /// Header of the second loop iterating from 0..NumRows.
+  BasicBlock *RowLoopHeader = nullptr;
+  /// Latch of the second loop iterating from 0..NumRows.
+  BasicBlock *RowLoopLatch = nullptr;
+  /// Header of the innermost loop iterating from 0..NumInner.
+  BasicBlock *InnerLoopHeader = nullptr;
+  /// Latch of the innermost loop iterating from 0..NumInner.
+  BasicBlock *InnerLoopLatch = nullptr;
+
+  TileInfo(unsigned NumRows, unsigned NumColumns, unsigned NumInner,
+           unsigned TileSize)
+      : NumRows(NumRows), NumColumns(NumColumns), NumInner(NumInner),
+        TileSize(TileSize) {}
+
+  /// Creates an IR loop nests for tiling of the form below. Returns the block
+  /// for the inner loop body and sets {Column,Row,Inner}LoopHeader/Latch
+  /// fields.
+  ///
+  /// for CurrentColumn = 0..NumColumns
+  ///   for CurrentRow = 0..NumRows
+  ///     for CurrentInner = 0..NumInner
+  BasicBlock *CreateTiledLoops(BasicBlock *Start, BasicBlock *End,
+                               IRBuilderBase &B, DomTreeUpdater &DTU,
+                               LoopInfo &LI);
+
+private:
+  /// Creates a new loop with header, body and latch blocks that iterates from
+  /// [0, Bound). Updates \p Preheader to branch to the new header and uses \p
+  /// Exit as exit block.  Adds the new loop blocks to \L and applies dominator
+  /// tree updates to \p DTU.
+  static BasicBlock *CreateLoop(BasicBlock *Preheader, BasicBlock *Exit,
+                                Value *Bound, Value *Step, StringRef Name,
+                                IRBuilderBase &B, DomTreeUpdater &DTU, Loop *L,
+                                LoopInfo &LI);
+};
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/MetaRenamer.h b/linux-x64/clang/include/llvm/Transforms/Utils/MetaRenamer.h
new file mode 100644
index 0000000..fff3dff
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/MetaRenamer.h
@@ -0,0 +1,26 @@
+//===- MetaRenamer.h - Rename everything with metasyntatic names ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass renames everything with metasyntatic names. The intent is to use
+// this pass after bugpoint reduction to conceal the nature of the original
+// program.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_METARENAMER_H
+#define LLVM_TRANSFORMS_UTILS_METARENAMER_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+struct MetaRenamerPass : PassInfoMixin<MetaRenamerPass> {
+  PreservedAnalyses run(Module &, ModuleAnalysisManager &);
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_METARENAMER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/ModuleUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/ModuleUtils.h
index c69af55..65added 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/ModuleUtils.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/ModuleUtils.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
 #define LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
 
+#include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/StringRef.h"
 #include <utility> // for std::pair
 
@@ -23,9 +24,7 @@
 class Function;
 class FunctionCallee;
 class GlobalValue;
-class GlobalVariable;
 class Constant;
-class StringRef;
 class Value;
 class Type;
 
@@ -43,6 +42,10 @@
 FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName,
                                             ArrayRef<Type *> InitArgTypes);
 
+/// Creates sanitizer constructor function.
+/// \return Returns pointer to constructor.
+Function *createSanitizerCtor(Module &M, StringRef CtorName);
+
 /// Creates sanitizer constructor function, and calls sanitizer's init
 /// function from it.
 /// \return Returns pair of pointers to constructor, and init functions
@@ -108,6 +111,13 @@
 /// unique identifier for this module, so we return the empty string.
 std::string getUniqueModuleId(Module *M);
 
+class CallInst;
+namespace VFABI {
+/// Overwrite the Vector Function ABI variants attribute with the names provide
+/// in \p VariantMappings.
+void setVectorVariantNames(CallInst *CI,
+                           const SmallVector<std::string, 8> &VariantMappings);
+} // End VFABI namespace
 } // End llvm namespace
 
 #endif //  LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/PredicateInfo.h b/linux-x64/clang/include/llvm/Transforms/Utils/PredicateInfo.h
index da4a5dc..c922476 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/PredicateInfo.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/PredicateInfo.h
@@ -51,49 +51,32 @@
 #define LLVM_TRANSFORMS_UTILS_PREDICATEINFO_H
 
 #include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/ilist.h"
 #include "llvm/ADT/ilist_node.h"
-#include "llvm/ADT/iterator.h"
-#include "llvm/Analysis/AssumptionCache.h"
-#include "llvm/Analysis/OrderedInstructions.h"
-#include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/Dominators.h"
 #include "llvm/IR/Instructions.h"
-#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/IR/Module.h"
-#include "llvm/IR/OperandTraits.h"
-#include "llvm/IR/Type.h"
-#include "llvm/IR/Use.h"
-#include "llvm/IR/User.h"
+#include "llvm/IR/PassManager.h"
 #include "llvm/IR/Value.h"
 #include "llvm/IR/ValueHandle.h"
 #include "llvm/Pass.h"
-#include "llvm/PassAnalysisSupport.h"
-#include "llvm/Support/Casting.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/ErrorHandling.h"
-#include <algorithm>
-#include <cassert>
-#include <cstddef>
-#include <iterator>
-#include <memory>
-#include <utility>
 
 namespace llvm {
 
+class AssumptionCache;
 class DominatorTree;
 class Function;
-class Instruction;
-class MemoryAccess;
-class LLVMContext;
+class IntrinsicInst;
 class raw_ostream;
 
 enum PredicateType { PT_Branch, PT_Assume, PT_Switch };
 
+/// Constraint for a predicate of the form "cmp Pred Op, OtherOp", where Op
+/// is the value the constraint applies to (the ssa.copy result).
+struct PredicateConstraint {
+  CmpInst::Predicate Predicate;
+  Value *OtherOp;
+};
+
 // Base class for all predicate information we provide.
 // All of our predicate information has at least a comparison.
 class PredicateBase : public ilist_node<PredicateBase> {
@@ -103,37 +86,38 @@
   // This can be use by passes, when destroying predicateinfo, to know
   // whether they can just drop the intrinsic, or have to merge metadata.
   Value *OriginalOp;
+  // The renamed operand in the condition used for this predicate. For nested
+  // predicates, this is different to OriginalOp which refers to the initial
+  // operand.
+  Value *RenamedOp;
+  // The condition associated with this predicate.
+  Value *Condition;
+
   PredicateBase(const PredicateBase &) = delete;
   PredicateBase &operator=(const PredicateBase &) = delete;
   PredicateBase() = delete;
   virtual ~PredicateBase() = default;
-
-protected:
-  PredicateBase(PredicateType PT, Value *Op) : Type(PT), OriginalOp(Op) {}
-};
-
-class PredicateWithCondition : public PredicateBase {
-public:
-  Value *Condition;
   static bool classof(const PredicateBase *PB) {
     return PB->Type == PT_Assume || PB->Type == PT_Branch ||
            PB->Type == PT_Switch;
   }
 
+  /// Fetch condition in the form of PredicateConstraint, if possible.
+  Optional<PredicateConstraint> getConstraint() const;
+
 protected:
-  PredicateWithCondition(PredicateType PT, Value *Op, Value *Condition)
-      : PredicateBase(PT, Op), Condition(Condition) {}
+  PredicateBase(PredicateType PT, Value *Op, Value *Condition)
+      : Type(PT), OriginalOp(Op), Condition(Condition) {}
 };
 
 // Provides predicate information for assumes.  Since assumes are always true,
 // we simply provide the assume instruction, so you can tell your relative
 // position to it.
-class PredicateAssume : public PredicateWithCondition {
+class PredicateAssume : public PredicateBase {
 public:
   IntrinsicInst *AssumeInst;
   PredicateAssume(Value *Op, IntrinsicInst *AssumeInst, Value *Condition)
-      : PredicateWithCondition(PT_Assume, Op, Condition),
-        AssumeInst(AssumeInst) {}
+      : PredicateBase(PT_Assume, Op, Condition), AssumeInst(AssumeInst) {}
   PredicateAssume() = delete;
   static bool classof(const PredicateBase *PB) {
     return PB->Type == PT_Assume;
@@ -143,7 +127,7 @@
 // Mixin class for edge predicates.  The FROM block is the block where the
 // predicate originates, and the TO block is the block where the predicate is
 // valid.
-class PredicateWithEdge : public PredicateWithCondition {
+class PredicateWithEdge : public PredicateBase {
 public:
   BasicBlock *From;
   BasicBlock *To;
@@ -155,7 +139,7 @@
 protected:
   PredicateWithEdge(PredicateType PType, Value *Op, BasicBlock *From,
                     BasicBlock *To, Value *Cond)
-      : PredicateWithCondition(PType, Op, Cond), From(From), To(To) {}
+      : PredicateBase(PType, Op, Cond), From(From), To(To) {}
 };
 
 // Provides predicate information for branches.
@@ -189,26 +173,9 @@
   }
 };
 
-// This name is used in a few places, so kick it into their own namespace
-namespace PredicateInfoClasses {
-struct ValueDFS;
-}
-
 /// Encapsulates PredicateInfo, including all data associated with memory
 /// accesses.
 class PredicateInfo {
-private:
-  // Used to store information about each value we might rename.
-  struct ValueInfo {
-    // Information about each possible copy. During processing, this is each
-    // inserted info. After processing, we move the uninserted ones to the
-    // uninserted vector.
-    SmallVector<PredicateBase *, 4> Infos;
-    SmallVector<PredicateBase *, 4> UninsertedInfos;
-  };
-  // This owns the all the predicate infos in the function, placed or not.
-  iplist<PredicateBase> AllInfos;
-
 public:
   PredicateInfo(Function &, DominatorTree &, AssumptionCache &);
   ~PredicateInfo();
@@ -226,42 +193,18 @@
   // Used by PredicateInfo annotater, dumpers, and wrapper pass.
   friend class PredicateInfoAnnotatedWriter;
   friend class PredicateInfoPrinterLegacyPass;
+  friend class PredicateInfoBuilder;
 
 private:
-  void buildPredicateInfo();
-  void processAssume(IntrinsicInst *, BasicBlock *, SmallPtrSetImpl<Value *> &);
-  void processBranch(BranchInst *, BasicBlock *, SmallPtrSetImpl<Value *> &);
-  void processSwitch(SwitchInst *, BasicBlock *, SmallPtrSetImpl<Value *> &);
-  void renameUses(SmallPtrSetImpl<Value *> &);
-  using ValueDFS = PredicateInfoClasses::ValueDFS;
-  typedef SmallVectorImpl<ValueDFS> ValueDFSStack;
-  void convertUsesToDFSOrdered(Value *, SmallVectorImpl<ValueDFS> &);
-  Value *materializeStack(unsigned int &, ValueDFSStack &, Value *);
-  bool stackIsInScope(const ValueDFSStack &, const ValueDFS &) const;
-  void popStackUntilDFSScope(ValueDFSStack &, const ValueDFS &);
-  ValueInfo &getOrCreateValueInfo(Value *);
-  void addInfoFor(SmallPtrSetImpl<Value *> &OpsToRename, Value *Op,
-                  PredicateBase *PB);
-  const ValueInfo &getValueInfo(Value *) const;
   Function &F;
-  DominatorTree &DT;
-  AssumptionCache &AC;
-  OrderedInstructions OI;
+
+  // This owns the all the predicate infos in the function, placed or not.
+  iplist<PredicateBase> AllInfos;
+
   // This maps from copy operands to Predicate Info. Note that it does not own
   // the Predicate Info, they belong to the ValueInfo structs in the ValueInfos
   // vector.
   DenseMap<const Value *, const PredicateBase *> PredicateMap;
-  // This stores info about each operand or comparison result we make copies
-  // of.  The real ValueInfos start at index 1, index 0 is unused so that we can
-  // more easily detect invalid indexing.
-  SmallVector<ValueInfo, 32> ValueInfos;
-  // This gives the index into the ValueInfos array for a given Value.  Because
-  // 0 is not a valid Value Info index, you can use DenseMap::lookup and tell
-  // whether it returned a valid result.
-  DenseMap<Value *, unsigned int> ValueInfoNums;
-  // The set of edges along which we can only handle phi uses, due to critical
-  // edges.
-  DenseSet<std::pair<BasicBlock *, BasicBlock *>> EdgeUsesOnly;
   // The set of ssa_copy declarations we created with our custom mangling.
   SmallSet<AssertingVH<Function>, 20> CreatedDeclarations;
 };
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/PromoteMemToReg.h b/linux-x64/clang/include/llvm/Transforms/Utils/PromoteMemToReg.h
index b2b4507..f827ffd 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/PromoteMemToReg.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/PromoteMemToReg.h
@@ -19,7 +19,6 @@
 template <typename T> class ArrayRef;
 class AllocaInst;
 class DominatorTree;
-class AliasSetTracker;
 class AssumptionCache;
 
 /// Return true if this alloca is legal for promotion.
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SSAUpdaterBulk.h b/linux-x64/clang/include/llvm/Transforms/Utils/SSAUpdaterBulk.h
index 5d17d6f..3a78e22 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/SSAUpdaterBulk.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SSAUpdaterBulk.h
@@ -14,7 +14,6 @@
 #define LLVM_TRANSFORMS_UTILS_SSAUPDATERBULK_H
 
 #include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/IR/PredIteratorCache.h"
 
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h b/linux-x64/clang/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
new file mode 100644
index 0000000..547245c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
@@ -0,0 +1,511 @@
+//===---- llvm/Analysis/ScalarEvolutionExpander.h - SCEV Exprs --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the classes used to generate code from scalar expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONEXPANDER_H
+#define LLVM_ANALYSIS_SCALAREVOLUTIONEXPANDER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/ScalarEvolutionNormalization.h"
+#include "llvm/Analysis/TargetFolder.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/CommandLine.h"
+
+namespace llvm {
+extern cl::opt<unsigned> SCEVCheapExpansionBudget;
+
+/// Return true if the given expression is safe to expand in the sense that
+/// all materialized values are safe to speculate anywhere their operands are
+/// defined.
+bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE);
+
+/// Return true if the given expression is safe to expand in the sense that
+/// all materialized values are defined and safe to speculate at the specified
+/// location and their operands are defined at this location.
+bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint,
+                      ScalarEvolution &SE);
+
+/// struct for holding enough information to help calculate the cost of the
+/// given SCEV when expanded into IR.
+struct SCEVOperand {
+  explicit SCEVOperand(unsigned Opc, int Idx, const SCEV *S) :
+    ParentOpcode(Opc), OperandIdx(Idx), S(S) { }
+  /// LLVM instruction opcode that uses the operand.
+  unsigned ParentOpcode;
+  /// The use index of an expanded instruction.
+  int OperandIdx;
+  /// The SCEV operand to be costed.
+  const SCEV* S;
+};
+
+/// This class uses information about analyze scalars to rewrite expressions
+/// in canonical form.
+///
+/// Clients should create an instance of this class when rewriting is needed,
+/// and destroy it when finished to allow the release of the associated
+/// memory.
+class SCEVExpander : public SCEVVisitor<SCEVExpander, Value *> {
+  ScalarEvolution &SE;
+  const DataLayout &DL;
+
+  // New instructions receive a name to identify them with the current pass.
+  const char *IVName;
+
+  /// Indicates whether LCSSA phis should be created for inserted values.
+  bool PreserveLCSSA;
+
+  // InsertedExpressions caches Values for reuse, so must track RAUW.
+  DenseMap<std::pair<const SCEV *, Instruction *>, TrackingVH<Value>>
+      InsertedExpressions;
+
+  // InsertedValues only flags inserted instructions so needs no RAUW.
+  DenseSet<AssertingVH<Value>> InsertedValues;
+  DenseSet<AssertingVH<Value>> InsertedPostIncValues;
+
+  /// Keep track of the existing IR values re-used during expansion.
+  /// FIXME: Ideally re-used instructions would not be added to
+  /// InsertedValues/InsertedPostIncValues.
+  SmallPtrSet<Value *, 16> ReusedValues;
+
+  /// A memoization of the "relevant" loop for a given SCEV.
+  DenseMap<const SCEV *, const Loop *> RelevantLoops;
+
+  /// Addrecs referring to any of the given loops are expanded in post-inc
+  /// mode. For example, expanding {1,+,1}<L> in post-inc mode returns the add
+  /// instruction that adds one to the phi for {0,+,1}<L>, as opposed to a new
+  /// phi starting at 1. This is only supported in non-canonical mode.
+  PostIncLoopSet PostIncLoops;
+
+  /// When this is non-null, addrecs expanded in the loop it indicates should
+  /// be inserted with increments at IVIncInsertPos.
+  const Loop *IVIncInsertLoop;
+
+  /// When expanding addrecs in the IVIncInsertLoop loop, insert the IV
+  /// increment at this position.
+  Instruction *IVIncInsertPos;
+
+  /// Phis that complete an IV chain. Reuse
+  DenseSet<AssertingVH<PHINode>> ChainedPhis;
+
+  /// When true, SCEVExpander tries to expand expressions in "canonical" form.
+  /// When false, expressions are expanded in a more literal form.
+  ///
+  /// In "canonical" form addrecs are expanded as arithmetic based on a
+  /// canonical induction variable. Note that CanonicalMode doesn't guarantee
+  /// that all expressions are expanded in "canonical" form. For some
+  /// expressions literal mode can be preferred.
+  bool CanonicalMode;
+
+  /// When invoked from LSR, the expander is in "strength reduction" mode. The
+  /// only difference is that phi's are only reused if they are already in
+  /// "expanded" form.
+  bool LSRMode;
+
+  typedef IRBuilder<TargetFolder, IRBuilderCallbackInserter> BuilderType;
+  BuilderType Builder;
+
+  // RAII object that stores the current insertion point and restores it when
+  // the object is destroyed. This includes the debug location.  Duplicated
+  // from InsertPointGuard to add SetInsertPoint() which is used to updated
+  // InsertPointGuards stack when insert points are moved during SCEV
+  // expansion.
+  class SCEVInsertPointGuard {
+    IRBuilderBase &Builder;
+    AssertingVH<BasicBlock> Block;
+    BasicBlock::iterator Point;
+    DebugLoc DbgLoc;
+    SCEVExpander *SE;
+
+    SCEVInsertPointGuard(const SCEVInsertPointGuard &) = delete;
+    SCEVInsertPointGuard &operator=(const SCEVInsertPointGuard &) = delete;
+
+  public:
+    SCEVInsertPointGuard(IRBuilderBase &B, SCEVExpander *SE)
+        : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
+          DbgLoc(B.getCurrentDebugLocation()), SE(SE) {
+      SE->InsertPointGuards.push_back(this);
+    }
+
+    ~SCEVInsertPointGuard() {
+      // These guards should always created/destroyed in FIFO order since they
+      // are used to guard lexically scoped blocks of code in
+      // ScalarEvolutionExpander.
+      assert(SE->InsertPointGuards.back() == this);
+      SE->InsertPointGuards.pop_back();
+      Builder.restoreIP(IRBuilderBase::InsertPoint(Block, Point));
+      Builder.SetCurrentDebugLocation(DbgLoc);
+    }
+
+    BasicBlock::iterator GetInsertPoint() const { return Point; }
+    void SetInsertPoint(BasicBlock::iterator I) { Point = I; }
+  };
+
+  /// Stack of pointers to saved insert points, used to keep insert points
+  /// consistent when instructions are moved.
+  SmallVector<SCEVInsertPointGuard *, 8> InsertPointGuards;
+
+#ifndef NDEBUG
+  const char *DebugType;
+#endif
+
+  friend struct SCEVVisitor<SCEVExpander, Value *>;
+
+public:
+  /// Construct a SCEVExpander in "canonical" mode.
+  explicit SCEVExpander(ScalarEvolution &se, const DataLayout &DL,
+                        const char *name, bool PreserveLCSSA = true)
+      : SE(se), DL(DL), IVName(name), PreserveLCSSA(PreserveLCSSA),
+        IVIncInsertLoop(nullptr), IVIncInsertPos(nullptr), CanonicalMode(true),
+        LSRMode(false),
+        Builder(se.getContext(), TargetFolder(DL),
+                IRBuilderCallbackInserter(
+                    [this](Instruction *I) { rememberInstruction(I); })) {
+#ifndef NDEBUG
+    DebugType = "";
+#endif
+  }
+
+  ~SCEVExpander() {
+    // Make sure the insert point guard stack is consistent.
+    assert(InsertPointGuards.empty());
+  }
+
+#ifndef NDEBUG
+  void setDebugType(const char *s) { DebugType = s; }
+#endif
+
+  /// Erase the contents of the InsertedExpressions map so that users trying
+  /// to expand the same expression into multiple BasicBlocks or different
+  /// places within the same BasicBlock can do so.
+  void clear() {
+    InsertedExpressions.clear();
+    InsertedValues.clear();
+    InsertedPostIncValues.clear();
+    ReusedValues.clear();
+    ChainedPhis.clear();
+  }
+
+  /// Return a vector containing all instructions inserted during expansion.
+  SmallVector<Instruction *, 32> getAllInsertedInstructions() const {
+    SmallVector<Instruction *, 32> Result;
+    for (auto &VH : InsertedValues) {
+      Value *V = VH;
+      if (ReusedValues.contains(V))
+        continue;
+      if (auto *Inst = dyn_cast<Instruction>(V))
+        Result.push_back(Inst);
+    }
+    for (auto &VH : InsertedPostIncValues) {
+      Value *V = VH;
+      if (ReusedValues.contains(V))
+        continue;
+      if (auto *Inst = dyn_cast<Instruction>(V))
+        Result.push_back(Inst);
+    }
+
+    return Result;
+  }
+
+  /// Return true for expressions that can't be evaluated at runtime
+  /// within given \b Budget.
+  ///
+  /// At is a parameter which specifies point in code where user is going to
+  /// expand this expression. Sometimes this knowledge can lead to
+  /// a less pessimistic cost estimation.
+  bool isHighCostExpansion(const SCEV *Expr, Loop *L, unsigned Budget,
+                           const TargetTransformInfo *TTI,
+                           const Instruction *At) {
+    assert(TTI && "This function requires TTI to be provided.");
+    assert(At && "This function requires At instruction to be provided.");
+    if (!TTI)      // In assert-less builds, avoid crashing
+      return true; // by always claiming to be high-cost.
+    SmallVector<SCEVOperand, 8> Worklist;
+    SmallPtrSet<const SCEV *, 8> Processed;
+    int BudgetRemaining = Budget * TargetTransformInfo::TCC_Basic;
+    Worklist.emplace_back(-1, -1, Expr);
+    while (!Worklist.empty()) {
+      const SCEVOperand WorkItem = Worklist.pop_back_val();
+      if (isHighCostExpansionHelper(WorkItem, L, *At, BudgetRemaining,
+                                    *TTI, Processed, Worklist))
+        return true;
+    }
+    assert(BudgetRemaining >= 0 && "Should have returned from inner loop.");
+    return false;
+  }
+
+  /// Return the induction variable increment's IV operand.
+  Instruction *getIVIncOperand(Instruction *IncV, Instruction *InsertPos,
+                               bool allowScale);
+
+  /// Utility for hoisting an IV increment.
+  bool hoistIVInc(Instruction *IncV, Instruction *InsertPos);
+
+  /// replace congruent phis with their most canonical representative. Return
+  /// the number of phis eliminated.
+  unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT,
+                               SmallVectorImpl<WeakTrackingVH> &DeadInsts,
+                               const TargetTransformInfo *TTI = nullptr);
+
+  /// Insert code to directly compute the specified SCEV expression into the
+  /// program.  The code is inserted into the specified block.
+  Value *expandCodeFor(const SCEV *SH, Type *Ty, Instruction *I) {
+    return expandCodeForImpl(SH, Ty, I, true);
+  }
+
+  /// Insert code to directly compute the specified SCEV expression into the
+  /// program.  The code is inserted into the SCEVExpander's current
+  /// insertion point. If a type is specified, the result will be expanded to
+  /// have that type, with a cast if necessary.
+  Value *expandCodeFor(const SCEV *SH, Type *Ty = nullptr) {
+    return expandCodeForImpl(SH, Ty, true);
+  }
+
+  /// Generates a code sequence that evaluates this predicate.  The inserted
+  /// instructions will be at position \p Loc.  The result will be of type i1
+  /// and will have a value of 0 when the predicate is false and 1 otherwise.
+  Value *expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc);
+
+  /// A specialized variant of expandCodeForPredicate, handling the case when
+  /// we are expanding code for a SCEVEqualPredicate.
+  Value *expandEqualPredicate(const SCEVEqualPredicate *Pred, Instruction *Loc);
+
+  /// Generates code that evaluates if the \p AR expression will overflow.
+  Value *generateOverflowCheck(const SCEVAddRecExpr *AR, Instruction *Loc,
+                               bool Signed);
+
+  /// A specialized variant of expandCodeForPredicate, handling the case when
+  /// we are expanding code for a SCEVWrapPredicate.
+  Value *expandWrapPredicate(const SCEVWrapPredicate *P, Instruction *Loc);
+
+  /// A specialized variant of expandCodeForPredicate, handling the case when
+  /// we are expanding code for a SCEVUnionPredicate.
+  Value *expandUnionPredicate(const SCEVUnionPredicate *Pred, Instruction *Loc);
+
+  /// Set the current IV increment loop and position.
+  void setIVIncInsertPos(const Loop *L, Instruction *Pos) {
+    assert(!CanonicalMode &&
+           "IV increment positions are not supported in CanonicalMode");
+    IVIncInsertLoop = L;
+    IVIncInsertPos = Pos;
+  }
+
+  /// Enable post-inc expansion for addrecs referring to the given
+  /// loops. Post-inc expansion is only supported in non-canonical mode.
+  void setPostInc(const PostIncLoopSet &L) {
+    assert(!CanonicalMode &&
+           "Post-inc expansion is not supported in CanonicalMode");
+    PostIncLoops = L;
+  }
+
+  /// Disable all post-inc expansion.
+  void clearPostInc() {
+    PostIncLoops.clear();
+
+    // When we change the post-inc loop set, cached expansions may no
+    // longer be valid.
+    InsertedPostIncValues.clear();
+  }
+
+  /// Disable the behavior of expanding expressions in canonical form rather
+  /// than in a more literal form. Non-canonical mode is useful for late
+  /// optimization passes.
+  void disableCanonicalMode() { CanonicalMode = false; }
+
+  void enableLSRMode() { LSRMode = true; }
+
+  /// Set the current insertion point. This is useful if multiple calls to
+  /// expandCodeFor() are going to be made with the same insert point and the
+  /// insert point may be moved during one of the expansions (e.g. if the
+  /// insert point is not a block terminator).
+  void setInsertPoint(Instruction *IP) {
+    assert(IP);
+    Builder.SetInsertPoint(IP);
+  }
+
+  /// Clear the current insertion point. This is useful if the instruction
+  /// that had been serving as the insertion point may have been deleted.
+  void clearInsertPoint() { Builder.ClearInsertionPoint(); }
+
+  /// Set location information used by debugging information.
+  void SetCurrentDebugLocation(DebugLoc L) {
+    Builder.SetCurrentDebugLocation(std::move(L));
+  }
+
+  /// Get location information used by debugging information.
+  DebugLoc getCurrentDebugLocation() const {
+    return Builder.getCurrentDebugLocation();
+  }
+
+  /// Return true if the specified instruction was inserted by the code
+  /// rewriter.  If so, the client should not modify the instruction. Note that
+  /// this also includes instructions re-used during expansion.
+  bool isInsertedInstruction(Instruction *I) const {
+    return InsertedValues.count(I) || InsertedPostIncValues.count(I);
+  }
+
+  void setChainedPhi(PHINode *PN) { ChainedPhis.insert(PN); }
+
+  /// Try to find the ValueOffsetPair for S. The function is mainly used to
+  /// check whether S can be expanded cheaply.  If this returns a non-None
+  /// value, we know we can codegen the `ValueOffsetPair` into a suitable
+  /// expansion identical with S so that S can be expanded cheaply.
+  ///
+  /// L is a hint which tells in which loop to look for the suitable value.
+  /// On success return value which is equivalent to the expanded S at point
+  /// At. Return nullptr if value was not found.
+  ///
+  /// Note that this function does not perform an exhaustive search. I.e if it
+  /// didn't find any value it does not mean that there is no such value.
+  ///
+  Optional<ScalarEvolution::ValueOffsetPair>
+  getRelatedExistingExpansion(const SCEV *S, const Instruction *At, Loop *L);
+
+  /// Returns a suitable insert point after \p I, that dominates \p
+  /// MustDominate. Skips instructions inserted by the expander.
+  BasicBlock::iterator findInsertPointAfter(Instruction *I,
+                                            Instruction *MustDominate);
+
+private:
+  LLVMContext &getContext() const { return SE.getContext(); }
+
+  /// Insert code to directly compute the specified SCEV expression into the
+  /// program. The code is inserted into the SCEVExpander's current
+  /// insertion point. If a type is specified, the result will be expanded to
+  /// have that type, with a cast if necessary. If \p Root is true, this
+  /// indicates that \p SH is the top-level expression to expand passed from
+  /// an external client call.
+  Value *expandCodeForImpl(const SCEV *SH, Type *Ty, bool Root);
+
+  /// Insert code to directly compute the specified SCEV expression into the
+  /// program. The code is inserted into the specified block. If \p
+  /// Root is true, this indicates that \p SH is the top-level expression to
+  /// expand passed from an external client call.
+  Value *expandCodeForImpl(const SCEV *SH, Type *Ty, Instruction *I, bool Root);
+
+  /// Recursive helper function for isHighCostExpansion.
+  bool isHighCostExpansionHelper(
+    const SCEVOperand &WorkItem, Loop *L, const Instruction &At,
+    int &BudgetRemaining, const TargetTransformInfo &TTI,
+    SmallPtrSetImpl<const SCEV *> &Processed,
+    SmallVectorImpl<SCEVOperand> &Worklist);
+
+  /// Insert the specified binary operator, doing a small amount of work to
+  /// avoid inserting an obviously redundant operation, and hoisting to an
+  /// outer loop when the opportunity is there and it is safe.
+  Value *InsertBinop(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS,
+                     SCEV::NoWrapFlags Flags, bool IsSafeToHoist);
+
+  /// Arrange for there to be a cast of V to Ty at IP, reusing an existing
+  /// cast if a suitable one exists, moving an existing cast if a suitable one
+  /// exists but isn't in the right place, or creating a new one.
+  Value *ReuseOrCreateCast(Value *V, Type *Ty, Instruction::CastOps Op,
+                           BasicBlock::iterator IP);
+
+  /// Insert a cast of V to the specified type, which must be possible with a
+  /// noop cast, doing what we can to share the casts.
+  Value *InsertNoopCastOfTo(Value *V, Type *Ty);
+
+  /// Expand a SCEVAddExpr with a pointer type into a GEP instead of using
+  /// ptrtoint+arithmetic+inttoptr.
+  Value *expandAddToGEP(const SCEV *const *op_begin, const SCEV *const *op_end,
+                        PointerType *PTy, Type *Ty, Value *V);
+  Value *expandAddToGEP(const SCEV *Op, PointerType *PTy, Type *Ty, Value *V);
+
+  /// Find a previous Value in ExprValueMap for expand.
+  ScalarEvolution::ValueOffsetPair
+  FindValueInExprValueMap(const SCEV *S, const Instruction *InsertPt);
+
+  Value *expand(const SCEV *S);
+
+  /// Determine the most "relevant" loop for the given SCEV.
+  const Loop *getRelevantLoop(const SCEV *);
+
+  Value *visitConstant(const SCEVConstant *S) { return S->getValue(); }
+
+  Value *visitPtrToIntExpr(const SCEVPtrToIntExpr *S);
+
+  Value *visitTruncateExpr(const SCEVTruncateExpr *S);
+
+  Value *visitZeroExtendExpr(const SCEVZeroExtendExpr *S);
+
+  Value *visitSignExtendExpr(const SCEVSignExtendExpr *S);
+
+  Value *visitAddExpr(const SCEVAddExpr *S);
+
+  Value *visitMulExpr(const SCEVMulExpr *S);
+
+  Value *visitUDivExpr(const SCEVUDivExpr *S);
+
+  Value *visitAddRecExpr(const SCEVAddRecExpr *S);
+
+  Value *visitSMaxExpr(const SCEVSMaxExpr *S);
+
+  Value *visitUMaxExpr(const SCEVUMaxExpr *S);
+
+  Value *visitSMinExpr(const SCEVSMinExpr *S);
+
+  Value *visitUMinExpr(const SCEVUMinExpr *S);
+
+  Value *visitUnknown(const SCEVUnknown *S) { return S->getValue(); }
+
+  void rememberInstruction(Value *I);
+
+  bool isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, const Loop *L);
+
+  bool isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV, const Loop *L);
+
+  Value *expandAddRecExprLiterally(const SCEVAddRecExpr *);
+  PHINode *getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
+                                     const Loop *L, Type *ExpandTy, Type *IntTy,
+                                     Type *&TruncTy, bool &InvertStep);
+  Value *expandIVInc(PHINode *PN, Value *StepV, const Loop *L, Type *ExpandTy,
+                     Type *IntTy, bool useSubtract);
+
+  void hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist,
+                      Instruction *Pos, PHINode *LoopPhi);
+
+  void fixupInsertPoints(Instruction *I);
+
+  /// If required, create LCSSA PHIs for \p Users' operand \p OpIdx. If new
+  /// LCSSA PHIs have been created, return the LCSSA PHI available at \p User.
+  /// If no PHIs have been created, return the unchanged operand \p OpIdx.
+  Value *fixupLCSSAFormFor(Instruction *User, unsigned OpIdx);
+};
+
+/// Helper to remove instructions inserted during SCEV expansion, unless they
+/// are marked as used.
+class SCEVExpanderCleaner {
+  SCEVExpander &Expander;
+
+  DominatorTree &DT;
+
+  /// Indicates whether the result of the expansion is used. If false, the
+  /// instructions added during expansion are removed.
+  bool ResultUsed;
+
+public:
+  SCEVExpanderCleaner(SCEVExpander &Expander, DominatorTree &DT)
+      : Expander(Expander), DT(DT), ResultUsed(false) {}
+
+  ~SCEVExpanderCleaner();
+
+  /// Indicate that the result of the expansion is used.
+  void markResultUsed() { ResultUsed = true; }
+};
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyCFGOptions.h b/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyCFGOptions.h
new file mode 100644
index 0000000..fb3a749
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyCFGOptions.h
@@ -0,0 +1,77 @@
+//===- SimplifyCFGOptions.h - Control structure for SimplifyCFG -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A set of parameters used to control the transforms in the SimplifyCFG pass.
+// Options may change depending on the position in the optimization pipeline.
+// For example, canonical form that includes switches and branches may later be
+// replaced by lookup tables and selects.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYCFGOPTIONS_H
+#define LLVM_TRANSFORMS_UTILS_SIMPLIFYCFGOPTIONS_H
+
+namespace llvm {
+
+class AssumptionCache;
+
+struct SimplifyCFGOptions {
+  int BonusInstThreshold = 1;
+  bool ForwardSwitchCondToPhi = false;
+  bool ConvertSwitchToLookupTable = false;
+  bool NeedCanonicalLoop = true;
+  bool HoistCommonInsts = false;
+  bool SinkCommonInsts = false;
+  bool SimplifyCondBranch = true;
+  bool FoldTwoEntryPHINode = true;
+
+  AssumptionCache *AC = nullptr;
+
+  // Support 'builder' pattern to set members by name at construction time.
+  SimplifyCFGOptions &bonusInstThreshold(int I) {
+    BonusInstThreshold = I;
+    return *this;
+  }
+  SimplifyCFGOptions &forwardSwitchCondToPhi(bool B) {
+    ForwardSwitchCondToPhi = B;
+    return *this;
+  }
+  SimplifyCFGOptions &convertSwitchToLookupTable(bool B) {
+    ConvertSwitchToLookupTable = B;
+    return *this;
+  }
+  SimplifyCFGOptions &needCanonicalLoops(bool B) {
+    NeedCanonicalLoop = B;
+    return *this;
+  }
+  SimplifyCFGOptions &hoistCommonInsts(bool B) {
+    HoistCommonInsts = B;
+    return *this;
+  }
+  SimplifyCFGOptions &sinkCommonInsts(bool B) {
+    SinkCommonInsts = B;
+    return *this;
+  }
+  SimplifyCFGOptions &setAssumptionCache(AssumptionCache *Cache) {
+    AC = Cache;
+    return *this;
+  }
+  SimplifyCFGOptions &setSimplifyCondBranch(bool B) {
+    SimplifyCondBranch = B;
+    return *this;
+  }
+
+  SimplifyCFGOptions &setFoldTwoEntryPHINode(bool B) {
+    FoldTwoEntryPHINode = B;
+    return *this;
+  }
+};
+
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_SIMPLIFYCFGOPTIONS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyIndVar.h b/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyIndVar.h
index dec73ef..4ba56fb 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyIndVar.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyIndVar.h
@@ -15,6 +15,8 @@
 #ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H
 #define LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H
 
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/IR/ConstantRange.h"
 #include "llvm/IR/ValueHandle.h"
 
 namespace llvm {
@@ -26,6 +28,7 @@
 class PHINode;
 class ScalarEvolution;
 class SCEVExpander;
+class TargetTransformInfo;
 
 /// Interface for visiting interesting IV users that are recognized but not
 /// simplified by this utility.
@@ -46,13 +49,36 @@
 /// simplifyUsersOfIV - Simplify instructions that use this induction variable
 /// by using ScalarEvolution to analyze the IV's recurrence.
 bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, DominatorTree *DT,
-                       LoopInfo *LI, SmallVectorImpl<WeakTrackingVH> &Dead,
+                       LoopInfo *LI, const TargetTransformInfo *TTI,
+                       SmallVectorImpl<WeakTrackingVH> &Dead,
                        SCEVExpander &Rewriter, IVVisitor *V = nullptr);
 
 /// SimplifyLoopIVs - Simplify users of induction variables within this
 /// loop. This does not actually change or add IVs.
 bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, DominatorTree *DT,
-                     LoopInfo *LI, SmallVectorImpl<WeakTrackingVH> &Dead);
+                     LoopInfo *LI, const TargetTransformInfo *TTI,
+                     SmallVectorImpl<WeakTrackingVH> &Dead);
+
+/// Collect information about induction variables that are used by sign/zero
+/// extend operations. This information is recorded by CollectExtend and provides
+/// the input to WidenIV.
+struct WideIVInfo {
+  PHINode *NarrowIV = nullptr;
+
+  // Widest integer type created [sz]ext
+  Type *WidestNativeType = nullptr;
+
+  // Was a sext user seen before a zext?
+  bool IsSigned = false;
+};
+
+/// Widen Induction Variables - Extend the width of an IV to cover its
+/// widest uses.
+PHINode *createWideIV(const WideIVInfo &WI,
+    LoopInfo *LI, ScalarEvolution *SE, SCEVExpander &Rewriter,
+    DominatorTree *DT, SmallVectorImpl<WeakTrackingVH> &DeadInsts,
+    unsigned &NumElimExt, unsigned &NumWidened,
+    bool HasGuards, bool UsePostIncrementRanges);
 
 } // end namespace llvm
 
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyLibCalls.h b/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyLibCalls.h
index 2572094..8703434 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyLibCalls.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyLibCalls.h
@@ -16,7 +16,6 @@
 
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/IR/IRBuilder.h"
 
 namespace llvm {
 class StringRef;
@@ -24,8 +23,7 @@
 class CallInst;
 class DataLayout;
 class Instruction;
-class TargetLibraryInfo;
-class BasicBlock;
+class IRBuilderBase;
 class Function;
 class OptimizationRemarkEmitter;
 class BlockFrequencyInfo;
@@ -50,25 +48,27 @@
   /// optimal value to replace the instruction with or 0 if a more
   /// optimal form can't be found.
   /// The call must not be an indirect call.
-  Value *optimizeCall(CallInst *CI);
+  Value *optimizeCall(CallInst *CI, IRBuilderBase &B);
 
 private:
-  Value *optimizeMemCpyChk(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeMemMoveChk(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeMemSetChk(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeMemCpyChk(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeMemMoveChk(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeMemSetChk(CallInst *CI, IRBuilderBase &B);
 
   /// Str/Stp cpy are similar enough to be handled in the same functions.
-  Value *optimizeStrpCpyChk(CallInst *CI, IRBuilder<> &B, LibFunc Func);
-  Value *optimizeStrpNCpyChk(CallInst *CI, IRBuilder<> &B, LibFunc Func);
-  Value *optimizeMemCCpyChk(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeSNPrintfChk(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeSPrintfChk(CallInst *CI,IRBuilder<> &B);
-  Value *optimizeStrCatChk(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeStrLCat(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeStrNCatChk(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeStrLCpyChk(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeVSNPrintfChk(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeVSPrintfChk(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrpCpyChk(CallInst *CI, IRBuilderBase &B, LibFunc Func);
+  Value *optimizeStrpNCpyChk(CallInst *CI, IRBuilderBase &B, LibFunc Func);
+  Value *optimizeStrLenChk(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeMemPCpyChk(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeMemCCpyChk(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeSNPrintfChk(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeSPrintfChk(CallInst *CI,IRBuilderBase &B);
+  Value *optimizeStrCatChk(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeStrLCat(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeStrNCatChk(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeStrLCpyChk(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeVSNPrintfChk(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeVSPrintfChk(CallInst *CI, IRBuilderBase &B);
 
   /// Checks whether the call \p CI to a fortified libcall is foldable
   /// to the non-fortified version.
@@ -126,7 +126,13 @@
   /// Erase an instruction from its parent with our eraser.
   void eraseFromParent(Instruction *I);
 
-  Value *foldMallocMemset(CallInst *Memset, IRBuilder<> &B);
+  /// Replace an instruction with a value and erase it from its parent.
+  void substituteInParent(Instruction *I, Value *With) {
+    replaceAllUsesWith(I, With);
+    eraseFromParent(I);
+  }
+
+  Value *foldMallocMemset(CallInst *Memset, IRBuilderBase &B);
 
 public:
   LibCallSimplifier(
@@ -144,94 +150,96 @@
   /// other instructions that use the given instruction were modified
   /// and the given instruction is dead.
   /// The call must not be an indirect call.
-  Value *optimizeCall(CallInst *CI);
+  Value *optimizeCall(CallInst *CI, IRBuilderBase &B);
 
 private:
   // String and Memory Library Call Optimizations
-  Value *optimizeStrCat(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeStrNCat(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeStrChr(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeStrRChr(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeStrCmp(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeStrNCmp(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeStrCpy(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeStpCpy(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeStrNCpy(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeStrLen(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeStrPBrk(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeStrTo(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeStrSpn(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeStrCSpn(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeStrStr(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeMemChr(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeMemCmp(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeBCmp(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeMemCmpBCmpCommon(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeMemCpy(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeMemMove(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeMemSet(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeRealloc(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeWcslen(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrCat(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeStrNCat(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeStrChr(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeStrRChr(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeStrCmp(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeStrNCmp(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeStrNDup(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeStrCpy(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeStpCpy(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeStrNCpy(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeStrLen(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeStrPBrk(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeStrTo(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeStrSpn(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeStrCSpn(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeStrStr(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeMemChr(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeMemRChr(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeMemCmp(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeBCmp(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeMemCmpBCmpCommon(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeMemCCpy(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeMemPCpy(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeMemCpy(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeMemMove(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeMemSet(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeRealloc(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeWcslen(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeBCopy(CallInst *CI, IRBuilderBase &B);
   // Wrapper for all String/Memory Library Call Optimizations
-  Value *optimizeStringMemoryLibCall(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStringMemoryLibCall(CallInst *CI, IRBuilderBase &B);
 
   // Math Library Optimizations
-  Value *optimizeCAbs(CallInst *CI, IRBuilder<> &B);
-  Value *optimizePow(CallInst *CI, IRBuilder<> &B);
-  Value *replacePowWithExp(CallInst *Pow, IRBuilder<> &B);
-  Value *replacePowWithSqrt(CallInst *Pow, IRBuilder<> &B);
-  Value *optimizeExp2(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeFMinFMax(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeLog(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeSqrt(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeSinCosPi(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeTan(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeCAbs(CallInst *CI, IRBuilderBase &B);
+  Value *optimizePow(CallInst *CI, IRBuilderBase &B);
+  Value *replacePowWithExp(CallInst *Pow, IRBuilderBase &B);
+  Value *replacePowWithSqrt(CallInst *Pow, IRBuilderBase &B);
+  Value *optimizeExp2(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeFMinFMax(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeLog(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeSqrt(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeSinCosPi(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeTan(CallInst *CI, IRBuilderBase &B);
   // Wrapper for all floating point library call optimizations
   Value *optimizeFloatingPointLibCall(CallInst *CI, LibFunc Func,
-                                      IRBuilder<> &B);
+                                      IRBuilderBase &B);
 
   // Integer Library Call Optimizations
-  Value *optimizeFFS(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeFls(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeAbs(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeIsDigit(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeIsAscii(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeToAscii(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeAtoi(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeStrtol(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeFFS(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeFls(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeAbs(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeIsDigit(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeIsAscii(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeToAscii(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeAtoi(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeStrtol(CallInst *CI, IRBuilderBase &B);
 
   // Formatting and IO Library Call Optimizations
-  Value *optimizeErrorReporting(CallInst *CI, IRBuilder<> &B,
+  Value *optimizeErrorReporting(CallInst *CI, IRBuilderBase &B,
                                 int StreamArg = -1);
-  Value *optimizePrintF(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeSPrintF(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeSnPrintF(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeFPrintF(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeFWrite(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeFRead(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeFPuts(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeFGets(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeFPutc(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeFGetc(CallInst *CI, IRBuilder<> &B);
-  Value *optimizePuts(CallInst *CI, IRBuilder<> &B);
+  Value *optimizePrintF(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeSPrintF(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeSnPrintF(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeFPrintF(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeFWrite(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeFPuts(CallInst *CI, IRBuilderBase &B);
+  Value *optimizePuts(CallInst *CI, IRBuilderBase &B);
 
   // Helper methods
-  Value *emitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len, IRBuilder<> &B);
+  Value *emitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len,
+                          IRBuilderBase &B);
   void classifyArgUse(Value *Val, Function *F, bool IsFloat,
                       SmallVectorImpl<CallInst *> &SinCalls,
                       SmallVectorImpl<CallInst *> &CosCalls,
                       SmallVectorImpl<CallInst *> &SinCosCalls);
-  Value *optimizePrintFString(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeSPrintFString(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeSnPrintFString(CallInst *CI, IRBuilder<> &B);
-  Value *optimizeFPrintFString(CallInst *CI, IRBuilder<> &B);
+  Value *optimizePrintFString(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeSPrintFString(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeSnPrintFString(CallInst *CI, IRBuilderBase &B);
+  Value *optimizeFPrintFString(CallInst *CI, IRBuilderBase &B);
 
   /// hasFloatVersion - Checks if there is a float version of the specified
   /// function by checking for an existing function with name FuncName + f
   bool hasFloatVersion(StringRef FuncName);
 
   /// Shared code to optimize strlen+wcslen.
-  Value *optimizeStringLength(CallInst *CI, IRBuilder<> &B, unsigned CharSize);
+  Value *optimizeStringLength(CallInst *CI, IRBuilderBase &B, unsigned CharSize);
 };
 } // End llvm namespace
 
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SizeOpts.h b/linux-x64/clang/include/llvm/Transforms/Utils/SizeOpts.h
index 1a052c6..3c1173b 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/SizeOpts.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SizeOpts.h
@@ -13,21 +13,94 @@
 #ifndef LLVM_TRANSFORMS_UTILS_SIZEOPTS_H
 #define LLVM_TRANSFORMS_UTILS_SIZEOPTS_H
 
+#include "llvm/Analysis/BlockFrequencyInfo.h"
+#include "llvm/Analysis/ProfileSummaryInfo.h"
+#include "llvm/Support/CommandLine.h"
+
+extern llvm::cl::opt<bool> EnablePGSO;
+extern llvm::cl::opt<bool> PGSOLargeWorkingSetSizeOnly;
+extern llvm::cl::opt<bool> PGSOColdCodeOnly;
+extern llvm::cl::opt<bool> PGSOColdCodeOnlyForInstrPGO;
+extern llvm::cl::opt<bool> PGSOColdCodeOnlyForSamplePGO;
+extern llvm::cl::opt<bool> PGSOColdCodeOnlyForPartialSamplePGO;
+extern llvm::cl::opt<bool> ForcePGSO;
+extern llvm::cl::opt<int> PgsoCutoffInstrProf;
+extern llvm::cl::opt<int> PgsoCutoffSampleProf;
+
 namespace llvm {
 
 class BasicBlock;
 class BlockFrequencyInfo;
 class Function;
-class ProfileSummaryInfo;
 
-/// Returns true if function \p F is suggested to be size-optimized base on the
+enum class PGSOQueryType {
+  IRPass, // A query call from an IR-level transform pass.
+  Test,   // A query call from a unit test.
+  Other,  // Others.
+};
+
+static inline bool isPGSOColdCodeOnly(ProfileSummaryInfo *PSI) {
+  return PGSOColdCodeOnly ||
+         (PSI->hasInstrumentationProfile() && PGSOColdCodeOnlyForInstrPGO) ||
+         (PSI->hasSampleProfile() &&
+          ((!PSI->hasPartialSampleProfile() && PGSOColdCodeOnlyForSamplePGO) ||
+           (PSI->hasPartialSampleProfile() &&
+            PGSOColdCodeOnlyForPartialSamplePGO))) ||
+         (PGSOLargeWorkingSetSizeOnly && !PSI->hasLargeWorkingSetSize());
+}
+
+template<typename AdapterT, typename FuncT, typename BFIT>
+bool shouldFuncOptimizeForSizeImpl(const FuncT *F, ProfileSummaryInfo *PSI,
+                                   BFIT *BFI, PGSOQueryType QueryType) {
+  assert(F);
+  if (!PSI || !BFI || !PSI->hasProfileSummary())
+    return false;
+  if (ForcePGSO)
+    return true;
+  if (!EnablePGSO)
+    return false;
+  if (isPGSOColdCodeOnly(PSI))
+    return AdapterT::isFunctionColdInCallGraph(F, PSI, *BFI);
+  if (PSI->hasSampleProfile())
+    // The "isCold" check seems to work better for Sample PGO as it could have
+    // many profile-unannotated functions.
+    return AdapterT::isFunctionColdInCallGraphNthPercentile(
+        PgsoCutoffSampleProf, F, PSI, *BFI);
+  return !AdapterT::isFunctionHotInCallGraphNthPercentile(PgsoCutoffInstrProf,
+                                                          F, PSI, *BFI);
+}
+
+template<typename AdapterT, typename BlockTOrBlockFreq, typename BFIT>
+bool shouldOptimizeForSizeImpl(BlockTOrBlockFreq BBOrBlockFreq, ProfileSummaryInfo *PSI,
+                               BFIT *BFI, PGSOQueryType QueryType) {
+  if (!PSI || !BFI || !PSI->hasProfileSummary())
+    return false;
+  if (ForcePGSO)
+    return true;
+  if (!EnablePGSO)
+    return false;
+  if (isPGSOColdCodeOnly(PSI))
+    return AdapterT::isColdBlock(BBOrBlockFreq, PSI, BFI);
+  if (PSI->hasSampleProfile())
+    // The "isCold" check seems to work better for Sample PGO as it could have
+    // many profile-unannotated functions.
+    return AdapterT::isColdBlockNthPercentile(PgsoCutoffSampleProf,
+                                              BBOrBlockFreq, PSI, BFI);
+  return !AdapterT::isHotBlockNthPercentile(PgsoCutoffInstrProf, BBOrBlockFreq,
+                                            PSI, BFI);
+}
+
+/// Returns true if function \p F is suggested to be size-optimized based on the
 /// profile.
-bool shouldOptimizeForSize(Function *F, ProfileSummaryInfo *PSI,
-                           BlockFrequencyInfo *BFI);
-/// Returns true if basic block \p BB is suggested to be size-optimized base
-/// on the profile.
-bool shouldOptimizeForSize(BasicBlock *BB, ProfileSummaryInfo *PSI,
-                           BlockFrequencyInfo *BFI);
+bool shouldOptimizeForSize(const Function *F, ProfileSummaryInfo *PSI,
+                           BlockFrequencyInfo *BFI,
+                           PGSOQueryType QueryType = PGSOQueryType::Other);
+
+/// Returns true if basic block \p BB is suggested to be size-optimized based on
+/// the profile.
+bool shouldOptimizeForSize(const BasicBlock *BB, ProfileSummaryInfo *PSI,
+                           BlockFrequencyInfo *BFI,
+                           PGSOQueryType QueryType = PGSOQueryType::Other);
 
 } // end namespace llvm
 
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/StripGCRelocates.h b/linux-x64/clang/include/llvm/Transforms/Utils/StripGCRelocates.h
new file mode 100644
index 0000000..13e6d8a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/StripGCRelocates.h
@@ -0,0 +1,25 @@
+//===- StripGCRelocates.h - -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_STRIPGCRELOCATES_H
+#define LLVM_TRANSFORMS_UTILS_STRIPGCRELOCATES_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+class StripGCRelocates : public PassInfoMixin<StripGCRelocates> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_STRIPGCRELOCATES_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/StripNonLineTableDebugInfo.h b/linux-x64/clang/include/llvm/Transforms/Utils/StripNonLineTableDebugInfo.h
new file mode 100644
index 0000000..20d0aab
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/StripNonLineTableDebugInfo.h
@@ -0,0 +1,26 @@
+//===- StripNonLineTableDebugInfo.h - -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_STRIPNONLINETABLEDEBUGINFO_H
+#define LLVM_TRANSFORMS_UTILS_STRIPNONLINETABLEDEBUGINFO_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+class StripNonLineTableDebugInfoPass
+    : public PassInfoMixin<StripNonLineTableDebugInfoPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_STRIPNONLINETABLEDEBUGINFO_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h b/linux-x64/clang/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
index f68534e..20b3602 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
@@ -7,47 +7,39 @@
 //===----------------------------------------------------------------------===//
 //
 // This pass is used to ensure that functions have at most one return and one
-// unwind instruction in them.  Additionally, it keeps track of which node is
-// the new exit node of the CFG.  If there are no return or unwind instructions
-// in the function, the getReturnBlock/getUnwindBlock methods will return a null
-// pointer.
+// unreachable instruction in them.
 //
 //===----------------------------------------------------------------------===//
 
 #ifndef LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H
 #define LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H
 
+#include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
-#include "llvm/PassRegistry.h"
 
 namespace llvm {
 
-struct UnifyFunctionExitNodes : public FunctionPass {
-  BasicBlock *ReturnBlock = nullptr;
-  BasicBlock *UnwindBlock = nullptr;
-  BasicBlock *UnreachableBlock;
+class BasicBlock;
 
+class UnifyFunctionExitNodesLegacyPass : public FunctionPass {
 public:
   static char ID; // Pass identification, replacement for typeid
-  UnifyFunctionExitNodes() : FunctionPass(ID) {
-    initializeUnifyFunctionExitNodesPass(*PassRegistry::getPassRegistry());
-  }
+  UnifyFunctionExitNodesLegacyPass();
 
   // We can preserve non-critical-edgeness when we unify function exit nodes
   void getAnalysisUsage(AnalysisUsage &AU) const override;
 
-  // getReturn|Unwind|UnreachableBlock - Return the new single (or nonexistent)
-  // return, unwind, or unreachable  basic blocks in the CFG.
-  //
-  BasicBlock *getReturnBlock() const { return ReturnBlock; }
-  BasicBlock *getUnwindBlock() const { return UnwindBlock; }
-  BasicBlock *getUnreachableBlock() const { return UnreachableBlock; }
-
   bool runOnFunction(Function &F) override;
 };
 
 Pass *createUnifyFunctionExitNodesPass();
 
+class UnifyFunctionExitNodesPass
+    : public PassInfoMixin<UnifyFunctionExitNodesPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
 } // end namespace llvm
 
 #endif // LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/UnifyLoopExits.h b/linux-x64/clang/include/llvm/Transforms/Utils/UnifyLoopExits.h
new file mode 100644
index 0000000..0b219cd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/UnifyLoopExits.h
@@ -0,0 +1,22 @@
+//===- UnifyLoopExits.h - Redirect exiting edges to one block -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_UNIFYLOOPEXITS_H
+#define LLVM_TRANSFORMS_UTILS_UNIFYLOOPEXITS_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class UnifyLoopExitsPass : public PassInfoMixin<UnifyLoopExitsPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_UNIFYLOOPEXITS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/UniqueInternalLinkageNames.h b/linux-x64/clang/include/llvm/Transforms/Utils/UniqueInternalLinkageNames.h
new file mode 100644
index 0000000..637b5d8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/UniqueInternalLinkageNames.h
@@ -0,0 +1,31 @@
+//===-- UniqueInternalLinkageNames.h - Uniq. Int. Linkage Names -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements unique naming of internal linkage symbols with option
+// -funique-internal-linkage-symbols.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_UNIQUEINTERNALLINKAGENAMES_H
+#define LLVM_TRANSFORMS_UTILS_UNIQUEINTERNALLINKAGENAMES_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Simple pass that provides a name to every anonymous globals.
+class UniqueInternalLinkageNamesPass
+    : public PassInfoMixin<UniqueInternalLinkageNamesPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_UNIQUEINTERNALLINKAGENAMES_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/UnrollLoop.h b/linux-x64/clang/include/llvm/Transforms/Utils/UnrollLoop.h
index 593ca26..4254bd7 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/UnrollLoop.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/UnrollLoop.h
@@ -16,9 +16,7 @@
 #define LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
 
 #include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/StringRef.h"
 #include "llvm/Analysis/TargetTransformInfo.h"
-#include "llvm/Transforms/Utils/ValueMapper.h"
 
 namespace llvm {
 
@@ -33,6 +31,8 @@
 class ProfileSummaryInfo;
 class OptimizationRemarkEmitter;
 class ScalarEvolution;
+class StringRef;
+class Value;
 
 using NewLoopsMap = SmallDenseMap<const Loop *, Loop *, 4>;
 
@@ -80,50 +80,43 @@
 
 LoopUnrollResult UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI,
                             ScalarEvolution *SE, DominatorTree *DT,
-                            AssumptionCache *AC, OptimizationRemarkEmitter *ORE,
-                            bool PreserveLCSSA, Loop **RemainderLoop = nullptr);
+                            AssumptionCache *AC,
+                            const llvm::TargetTransformInfo *TTI,
+                            OptimizationRemarkEmitter *ORE, bool PreserveLCSSA,
+                            Loop **RemainderLoop = nullptr);
 
-bool UnrollRuntimeLoopRemainder(Loop *L, unsigned Count,
-                                bool AllowExpensiveTripCount,
-                                bool UseEpilogRemainder, bool UnrollRemainder,
-                                bool ForgetAllSCEV, LoopInfo *LI,
-                                ScalarEvolution *SE, DominatorTree *DT,
-                                AssumptionCache *AC, bool PreserveLCSSA,
-                                Loop **ResultLoop = nullptr);
-
-void computePeelCount(Loop *L, unsigned LoopSize,
-                      TargetTransformInfo::UnrollingPreferences &UP,
-                      unsigned &TripCount, ScalarEvolution &SE);
-
-bool canPeel(Loop *L);
-
-bool peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI, ScalarEvolution *SE,
-              DominatorTree *DT, AssumptionCache *AC, bool PreserveLCSSA);
+bool UnrollRuntimeLoopRemainder(
+    Loop *L, unsigned Count, bool AllowExpensiveTripCount,
+    bool UseEpilogRemainder, bool UnrollRemainder, bool ForgetAllSCEV,
+    LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC,
+    const TargetTransformInfo *TTI, bool PreserveLCSSA,
+    Loop **ResultLoop = nullptr);
 
 LoopUnrollResult UnrollAndJamLoop(Loop *L, unsigned Count, unsigned TripCount,
                                   unsigned TripMultiple, bool UnrollRemainder,
                                   LoopInfo *LI, ScalarEvolution *SE,
                                   DominatorTree *DT, AssumptionCache *AC,
+                                  const TargetTransformInfo *TTI,
                                   OptimizationRemarkEmitter *ORE,
                                   Loop **EpilogueLoop = nullptr);
 
 bool isSafeToUnrollAndJam(Loop *L, ScalarEvolution &SE, DominatorTree &DT,
-                          DependenceInfo &DI);
+                          DependenceInfo &DI, LoopInfo &LI);
 
 bool computeUnrollCount(Loop *L, const TargetTransformInfo &TTI,
                         DominatorTree &DT, LoopInfo *LI, ScalarEvolution &SE,
                         const SmallPtrSetImpl<const Value *> &EphValues,
                         OptimizationRemarkEmitter *ORE, unsigned &TripCount,
-                        unsigned MaxTripCount, unsigned &TripMultiple,
-                        unsigned LoopSize,
+                        unsigned MaxTripCount, bool MaxOrZero,
+                        unsigned &TripMultiple, unsigned LoopSize,
                         TargetTransformInfo::UnrollingPreferences &UP,
+                        TargetTransformInfo::PeelingPreferences &PP,
                         bool &UseUpperBound);
 
-void remapInstruction(Instruction *I, ValueToValueMapTy &VMap);
-
 void simplifyLoopAfterUnroll(Loop *L, bool SimplifyIVs, LoopInfo *LI,
                              ScalarEvolution *SE, DominatorTree *DT,
-                             AssumptionCache *AC);
+                             AssumptionCache *AC,
+                             const TargetTransformInfo *TTI);
 
 MDNode *GetUnrollMetadata(MDNode *LoopID, StringRef Name);
 
@@ -132,7 +125,7 @@
     BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, int OptLevel,
     Optional<unsigned> UserThreshold, Optional<unsigned> UserCount,
     Optional<bool> UserAllowPartial, Optional<bool> UserRuntime,
-    Optional<bool> UserUpperBound, Optional<bool> UserAllowPeeling);
+    Optional<bool> UserUpperBound, Optional<unsigned> UserFullUnrollMaxCount);
 
 unsigned ApproximateLoopSize(const Loop *L, unsigned &NumCalls,
                              bool &NotDuplicatable, bool &Convergent,
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/VNCoercion.h b/linux-x64/clang/include/llvm/Transforms/Utils/VNCoercion.h
index f67b9ed..1cc751d 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/VNCoercion.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/VNCoercion.h
@@ -20,14 +20,14 @@
 
 #ifndef LLVM_TRANSFORMS_UTILS_VNCOERCION_H
 #define LLVM_TRANSFORMS_UTILS_VNCOERCION_H
-#include "llvm/IR/IRBuilder.h"
 
 namespace llvm {
-class Function;
+class Constant;
 class StoreInst;
 class LoadInst;
 class MemIntrinsic;
 class Instruction;
+class IRBuilderBase;
 class Value;
 class Type;
 class DataLayout;
@@ -44,7 +44,7 @@
 ///
 /// If we can't do it, return null.
 Value *coerceAvailableValueToLoadType(Value *StoredVal, Type *LoadedTy,
-                                      IRBuilder<> &IRB, const DataLayout &DL);
+                                      IRBuilderBase &IRB, const DataLayout &DL);
 
 /// This function determines whether a value for the pointer LoadPtr can be
 /// extracted from the store at DepSI.
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/ValueMapper.h b/linux-x64/clang/include/llvm/Transforms/Utils/ValueMapper.h
index 1952a21..ff5bfc6 100644
--- a/linux-x64/clang/include/llvm/Transforms/Utils/ValueMapper.h
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/ValueMapper.h
@@ -22,7 +22,7 @@
 
 class Constant;
 class Function;
-class GlobalAlias;
+class GlobalIndirectSymbol;
 class GlobalVariable;
 class Instruction;
 class MDNode;
@@ -120,7 +120,7 @@
 /// instance:
 /// - \a scheduleMapGlobalInitializer()
 /// - \a scheduleMapAppendingVariable()
-/// - \a scheduleMapGlobalAliasee()
+/// - \a scheduleMapGlobalIndirectSymbol()
 /// - \a scheduleRemapFunction()
 ///
 /// Sometimes a callback needs a different mapping context.  Such a context can
@@ -180,8 +180,9 @@
                                     bool IsOldCtorDtor,
                                     ArrayRef<Constant *> NewMembers,
                                     unsigned MappingContextID = 0);
-  void scheduleMapGlobalAliasee(GlobalAlias &GA, Constant &Aliasee,
-                                unsigned MappingContextID = 0);
+  void scheduleMapGlobalIndirectSymbol(GlobalIndirectSymbol &GIS,
+                                       Constant &Target,
+                                       unsigned MappingContextID = 0);
   void scheduleRemapFunction(Function &F, unsigned MappingContextID = 0);
 };
 
diff --git a/linux-x64/clang/include/llvm/Transforms/Vectorize.h b/linux-x64/clang/include/llvm/Transforms/Vectorize.h
index 88a0e49..bc75142 100644
--- a/linux-x64/clang/include/llvm/Transforms/Vectorize.h
+++ b/linux-x64/clang/include/llvm/Transforms/Vectorize.h
@@ -16,7 +16,6 @@
 
 namespace llvm {
 class BasicBlock;
-class BasicBlockPass;
 class Pass;
 
 //===----------------------------------------------------------------------===//
@@ -139,6 +138,12 @@
 //
 Pass *createLoadStoreVectorizerPass();
 
+//===----------------------------------------------------------------------===//
+//
+// Optimize partial vector operations using target cost models.
+//
+Pass *createVectorCombinePass();
+
 } // End llvm namespace
 
 #endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h b/linux-x64/clang/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
index b144006..2f80b43 100644
--- a/linux-x64/clang/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
+++ b/linux-x64/clang/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
@@ -29,22 +29,11 @@
 #include "llvm/ADT/MapVector.h"
 #include "llvm/Analysis/LoopAccessAnalysis.h"
 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
+#include "llvm/Support/TypeSize.h"
 #include "llvm/Transforms/Utils/LoopUtils.h"
 
 namespace llvm {
 
-/// Create an analysis remark that explains why vectorization failed
-///
-/// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
-/// RemarkName is the identifier for the remark.  If \p I is passed it is an
-/// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
-/// the location of the remark.  \return the remark object that can be
-/// streamed to.
-OptimizationRemarkAnalysis createLVMissedAnalysis(const char *PassName,
-                                                  StringRef RemarkName,
-                                                  Loop *TheLoop,
-                                                  Instruction *I = nullptr);
-
 /// Utility class for getting and setting loop vectorizer hints in the form
 /// of loop metadata.
 /// This class keeps a number of loop annotations locally (as member variables)
@@ -55,7 +44,14 @@
 /// for example 'force', means a decision has been made. So, we need to be
 /// careful NOT to add them if the user hasn't specifically asked so.
 class LoopVectorizeHints {
-  enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE, HK_ISVECTORIZED };
+  enum HintKind {
+    HK_WIDTH,
+    HK_UNROLL,
+    HK_FORCE,
+    HK_ISVECTORIZED,
+    HK_PREDICATE,
+    HK_SCALABLE
+  };
 
   /// Hint - associates name and validation with the hint value.
   struct Hint {
@@ -81,6 +77,12 @@
   /// Already Vectorized
   Hint IsVectorized;
 
+  /// Vector Predicate
+  Hint Predicate;
+
+  /// Says whether we should use fixed width or scalable vectorization.
+  Hint Scalable;
+
   /// Return the loop metadata prefix.
   static StringRef Prefix() { return "llvm.loop."; }
 
@@ -106,9 +108,12 @@
   /// Dumps all the hint information.
   void emitRemarkWithHints() const;
 
-  unsigned getWidth() const { return Width.Value; }
+  ElementCount getWidth() const {
+    return ElementCount::get(Width.Value, isScalable());
+  }
   unsigned getInterleave() const { return Interleave.Value; }
   unsigned getIsVectorized() const { return IsVectorized.Value; }
+  unsigned getPredicate() const { return Predicate.Value; }
   enum ForceKind getForce() const {
     if ((ForceKind)Force.Value == FK_Undefined &&
         hasDisableAllTransformsHint(TheLoop))
@@ -116,6 +121,8 @@
     return (ForceKind)Force.Value;
   }
 
+  bool isScalable() const { return Scalable.Value; }
+
   /// If hints are provided that force vectorization, use the AlwaysPrint
   /// pass name to force the frontend to print the diagnostic.
   const char *vectorizeAnalysisPassName() const;
@@ -126,7 +133,9 @@
     // enabled by default because can be unsafe or inefficient. For example,
     // reordering floating-point operations will change the way round-off
     // error accumulates in the loop.
-    return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1;
+    ElementCount EC = getWidth();
+    return getForce() == LoopVectorizeHints::FK_Enabled ||
+           EC.getKnownMinValue() > 1;
   }
 
   bool isPotentiallyUnsafe() const {
@@ -205,17 +214,18 @@
 public:
   LoopVectorizationLegality(
       Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT,
-      TargetTransformInfo *TTI, TargetLibraryInfo *TLI, AliasAnalysis *AA,
+      TargetTransformInfo *TTI, TargetLibraryInfo *TLI, AAResults *AA,
       Function *F, std::function<const LoopAccessInfo &(Loop &)> *GetLAA,
       LoopInfo *LI, OptimizationRemarkEmitter *ORE,
       LoopVectorizationRequirements *R, LoopVectorizeHints *H, DemandedBits *DB,
-      AssumptionCache *AC)
+      AssumptionCache *AC, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI)
       : TheLoop(L), LI(LI), PSE(PSE), TTI(TTI), TLI(TLI), DT(DT),
-        GetLAA(GetLAA), ORE(ORE), Requirements(R), Hints(H), DB(DB), AC(AC) {}
+        GetLAA(GetLAA), ORE(ORE), Requirements(R), Hints(H), DB(DB), AC(AC),
+        BFI(BFI), PSI(PSI) {}
 
   /// ReductionList contains the reduction descriptors for all
   /// of the reductions that were found in the loop.
-  using ReductionList = DenseMap<PHINode *, RecurrenceDescriptor>;
+  using ReductionList = MapVector<PHINode *, RecurrenceDescriptor>;
 
   /// InductionList saves induction variables and maps them to the
   /// induction descriptor.
@@ -235,20 +245,21 @@
   bool canVectorize(bool UseVPlanNativePath);
 
   /// Return true if we can vectorize this loop while folding its tail by
-  /// masking.
-  bool canFoldTailByMasking();
+  /// masking, and mark all respective loads/stores for masking.
+  /// This object's state is only modified iff this function returns true.
+  bool prepareToFoldTailByMasking();
 
   /// Returns the primary induction variable.
   PHINode *getPrimaryInduction() { return PrimaryInduction; }
 
   /// Returns the reduction variables found in the loop.
-  ReductionList *getReductionVars() { return &Reductions; }
+  ReductionList &getReductionVars() { return Reductions; }
 
   /// Returns the induction variables found in the loop.
-  InductionList *getInductionVars() { return &Inductions; }
+  InductionList &getInductionVars() { return Inductions; }
 
   /// Return the first-order recurrences found in the loop.
-  RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; }
+  RecurrenceSet &getFirstOrderRecurrences() { return FirstOrderRecurrences; }
 
   /// Return the set of instructions to sink to handle first-order recurrences.
   DenseMap<Instruction *, Instruction *> &getSinkAfter() { return SinkAfter; }
@@ -294,6 +305,19 @@
   /// Returns true if the value V is uniform within the loop.
   bool isUniform(Value *V);
 
+  /// A uniform memory op is a load or store which accesses the same memory
+  /// location on all lanes.
+  bool isUniformMemOp(Instruction &I) {
+    Value *Ptr = getLoadStorePointerOperand(&I);
+    if (!Ptr)
+      return false;
+    // Note: There's nothing inherent which prevents predicated loads and
+    // stores from being uniform.  The current lowering simply doesn't handle
+    // it; in particular, the cost model distinguishes scatter/gather from
+    // scalar w/predication, and we currently rely on the scalar path.
+    return isUniform(Ptr) && !blockNeedsPredication(I.getParent());
+  }
+
   /// Returns the information that we collected about runtime memory check.
   const RuntimePointerChecking *getRuntimePointerChecking() const {
     return LAI->getRuntimePointerChecking();
@@ -301,17 +325,21 @@
 
   const LoopAccessInfo *getLAI() const { return LAI; }
 
+  bool isSafeForAnyVectorWidth() const {
+    return LAI->getDepChecker().isSafeForAnyVectorWidth();
+  }
+
   unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); }
 
-  uint64_t getMaxSafeRegisterWidth() const {
-    return LAI->getDepChecker().getMaxSafeRegisterWidth();
+  uint64_t getMaxSafeVectorWidthInBits() const {
+    return LAI->getDepChecker().getMaxSafeVectorWidthInBits();
   }
 
   bool hasStride(Value *V) { return LAI->hasStride(V); }
 
   /// Returns true if vector representation of the instruction \p I
   /// requires mask.
-  bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); }
+  bool isMaskRequired(const Instruction *I) { return MaskedOp.contains(I); }
 
   unsigned getNumStores() const { return LAI->getNumStores(); }
   unsigned getNumLoads() const { return LAI->getNumLoads(); }
@@ -319,6 +347,12 @@
   // Returns true if the NoNaN attribute is set on the function.
   bool hasFunNoNaNAttr() const { return HasFunNoNaNAttr; }
 
+  /// Returns all assume calls in predicated blocks. They need to be dropped
+  /// when flattening the CFG.
+  const SmallPtrSetImpl<Instruction *> &getConditionalAssumes() const {
+    return ConditionalAssumes;
+  }
+
 private:
   /// Return true if the pre-header, exiting and latch blocks of \p Lp and all
   /// its nested loops are considered legal for vectorization. These legal
@@ -362,9 +396,22 @@
   bool canVectorizeOuterLoop();
 
   /// Return true if all of the instructions in the block can be speculatively
-  /// executed. \p SafePtrs is a list of addresses that are known to be legal
-  /// and we know that we can read from them without segfault.
-  bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs);
+  /// executed, and record the loads/stores that require masking. If's that
+  /// guard loads can be ignored under "assume safety" unless \p PreserveGuards
+  /// is true. This can happen when we introduces guards for which the original
+  /// "unguarded-loads are safe" assumption does not hold. For example, the
+  /// vectorizer's fold-tail transformation changes the loop to execute beyond
+  /// its original trip-count, under a proper guard, which should be preserved.
+  /// \p SafePtrs is a list of addresses that are known to be legal and we know
+  /// that we can read from them without segfault.
+  /// \p MaskedOp is a list of instructions that have to be transformed into
+  /// calls to the appropriate masked intrinsic when the loop is vectorized.
+  /// \p ConditionalAssumes is a list of assume instructions in predicated
+  /// blocks that must be dropped if the CFG gets flattened.
+  bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs,
+                            SmallPtrSetImpl<const Instruction *> &MaskedOp,
+                            SmallPtrSetImpl<Instruction *> &ConditionalAssumes,
+                            bool PreserveGuards = false) const;
 
   /// Updates the vectorization state by adding \p Phi to the inductions list.
   /// This can set \p Phi as the main induction of the loop if \p Phi is a
@@ -382,14 +429,6 @@
     return LAI ? &LAI->getSymbolicStrides() : nullptr;
   }
 
-  /// Reports a vectorization illegality: print \p DebugMsg for debugging
-  /// purposes along with the corresponding optimization remark \p RemarkName.
-  /// If \p I is passed it is an instruction that prevents vectorization.
-  /// Otherwise the loop is used for the location of the remark.
-  void reportVectorizationFailure(const StringRef DebugMsg,
-      const StringRef OREMsg, const StringRef ORETag,
-      Instruction *I = nullptr) const;
-
   /// The loop that we evaluate.
   Loop *TheLoop;
 
@@ -452,8 +491,8 @@
   /// Holds the widest induction type encountered.
   Type *WidestIndTy = nullptr;
 
-  /// Allowed outside users. This holds the induction and reduction
-  /// vars which can be accessed from outside the loop.
+  /// Allowed outside users. This holds the variables that can be accessed from
+  /// outside the loop.
   SmallPtrSet<Value *, 4> AllowedExit;
 
   /// Can we assume the absence of NaNs.
@@ -476,6 +515,14 @@
   /// While vectorizing these instructions we have to generate a
   /// call to the appropriate masked intrinsic
   SmallPtrSet<const Instruction *, 8> MaskedOp;
+
+  /// Assume instructions in predicated blocks must be dropped if the CFG gets
+  /// flattened.
+  SmallPtrSet<Instruction *, 8> ConditionalAssumes;
+
+  /// BFI and PSI are used to check for profile guided size optimizations.
+  BlockFrequencyInfo *BFI;
+  ProfileSummaryInfo *PSI;
 };
 
 } // namespace llvm
diff --git a/linux-x64/clang/include/llvm/Transforms/Vectorize/LoopVectorize.h b/linux-x64/clang/include/llvm/Transforms/Vectorize/LoopVectorize.h
index d1ec06a..ecb44a7 100644
--- a/linux-x64/clang/include/llvm/Transforms/Vectorize/LoopVectorize.h
+++ b/linux-x64/clang/include/llvm/Transforms/Vectorize/LoopVectorize.h
@@ -56,12 +56,13 @@
 #ifndef LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZE_H
 #define LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZE_H
 
-#include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/Support/CommandLine.h"
 #include <functional>
 
 namespace llvm {
 
+class AAResults;
 class AssumptionCache;
 class BlockFrequencyInfo;
 class DemandedBits;
@@ -115,8 +116,18 @@
   }
 };
 
+/// Storage for information about made changes.
+struct LoopVectorizeResult {
+  bool MadeAnyChange;
+  bool MadeCFGChange;
+
+  LoopVectorizeResult(bool MadeAnyChange, bool MadeCFGChange)
+      : MadeAnyChange(MadeAnyChange), MadeCFGChange(MadeCFGChange) {}
+};
+
 /// The LoopVectorize Pass.
 struct LoopVectorizePass : public PassInfoMixin<LoopVectorizePass> {
+private:
   /// If false, consider all loops for interleaving.
   /// If true, only loops that explicitly request interleaving are considered.
   bool InterleaveOnlyWhenForced;
@@ -125,9 +136,8 @@
   /// If true, only loops that explicitly request vectorization are considered.
   bool VectorizeOnlyWhenForced;
 
-  LoopVectorizePass(LoopVectorizeOptions Opts = {})
-      : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced),
-        VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced) {}
+public:
+  LoopVectorizePass(LoopVectorizeOptions Opts = {});
 
   ScalarEvolution *SE;
   LoopInfo *LI;
@@ -136,7 +146,7 @@
   BlockFrequencyInfo *BFI;
   TargetLibraryInfo *TLI;
   DemandedBits *DB;
-  AliasAnalysis *AA;
+  AAResults *AA;
   AssumptionCache *AC;
   std::function<const LoopAccessInfo &(Loop &)> *GetLAA;
   OptimizationRemarkEmitter *ORE;
@@ -145,16 +155,25 @@
   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 
   // Shim for old PM.
-  bool runImpl(Function &F, ScalarEvolution &SE_, LoopInfo &LI_,
-               TargetTransformInfo &TTI_, DominatorTree &DT_,
-               BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
-               DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_,
-               std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
-               OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_);
+  LoopVectorizeResult
+  runImpl(Function &F, ScalarEvolution &SE_, LoopInfo &LI_,
+          TargetTransformInfo &TTI_, DominatorTree &DT_,
+          BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, DemandedBits &DB_,
+          AAResults &AA_, AssumptionCache &AC_,
+          std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
+          OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_);
 
   bool processLoop(Loop *L);
 };
 
+/// Reports a vectorization failure: print \p DebugMsg for debugging
+/// purposes along with the corresponding optimization remark \p RemarkName.
+/// If \p I is passed, it is an instruction that prevents vectorization.
+/// Otherwise, the loop \p TheLoop is used for the location of the remark.
+void reportVectorizationFailure(const StringRef DebugMsg,
+    const StringRef OREMsg, const StringRef ORETag,
+    OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I = nullptr);
+
 } // end namespace llvm
 
 #endif // LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Vectorize/SLPVectorizer.h b/linux-x64/clang/include/llvm/Transforms/Vectorize/SLPVectorizer.h
index ac6afb7..52a5793 100644
--- a/linux-x64/clang/include/llvm/Transforms/Vectorize/SLPVectorizer.h
+++ b/linux-x64/clang/include/llvm/Transforms/Vectorize/SLPVectorizer.h
@@ -22,12 +22,11 @@
 #include "llvm/ADT/MapVector.h"
 #include "llvm/ADT/None.h"
 #include "llvm/ADT/SmallVector.h"
-#include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/IR/PassManager.h"
-#include "llvm/IR/ValueHandle.h"
 
 namespace llvm {
 
+class AAResults;
 class AssumptionCache;
 class BasicBlock;
 class CmpInst;
@@ -35,6 +34,7 @@
 class DemandedBits;
 class DominatorTree;
 class Function;
+class GetElementPtrInst;
 class InsertElementInst;
 class InsertValueInst;
 class Instruction;
@@ -55,18 +55,16 @@
 
 } // end namespace slpvectorizer
 
-extern cl::opt<bool> RunSLPVectorization;
-
 struct SLPVectorizerPass : public PassInfoMixin<SLPVectorizerPass> {
   using StoreList = SmallVector<StoreInst *, 8>;
   using StoreListMap = MapVector<Value *, StoreList>;
-  using WeakTrackingVHList = SmallVector<WeakTrackingVH, 8>;
-  using WeakTrackingVHListMap = MapVector<Value *, WeakTrackingVHList>;
+  using GEPList = SmallVector<GetElementPtrInst *, 8>;
+  using GEPListMap = MapVector<Value *, GEPList>;
 
   ScalarEvolution *SE = nullptr;
   TargetTransformInfo *TTI = nullptr;
   TargetLibraryInfo *TLI = nullptr;
-  AliasAnalysis *AA = nullptr;
+  AAResults *AA = nullptr;
   LoopInfo *LI = nullptr;
   DominatorTree *DT = nullptr;
   AssumptionCache *AC = nullptr;
@@ -78,7 +76,7 @@
 
   // Glue for old PM.
   bool runImpl(Function &F, ScalarEvolution *SE_, TargetTransformInfo *TTI_,
-               TargetLibraryInfo *TLI_, AliasAnalysis *AA_, LoopInfo *LI_,
+               TargetLibraryInfo *TLI_, AAResults *AA_, LoopInfo *LI_,
                DominatorTree *DT_, AssumptionCache *AC_, DemandedBits *DB_,
                OptimizationRemarkEmitter *ORE_);
 
@@ -96,11 +94,15 @@
   bool tryToVectorizePair(Value *A, Value *B, slpvectorizer::BoUpSLP &R);
 
   /// Try to vectorize a list of operands.
-  /// \param UserCost Cost of the user operations of \p VL if they may affect
-  /// the cost of the vectorization.
+  /// When \p InsertUses is provided and its entries are non-zero
+  /// then users of \p VL are known to be InsertElement instructions
+  /// each associated with same VL entry index. Their cost is then
+  /// used to adjust cost of the vectorization assuming instcombine pass
+  /// then optimizes ExtractElement-InsertElement sequence.
   /// \returns true if a value was vectorized.
   bool tryToVectorizeList(ArrayRef<Value *> VL, slpvectorizer::BoUpSLP &R,
-                          int UserCost = 0, bool AllowReorder = false);
+                          bool AllowReorder = false,
+                          ArrayRef<Value *> InsertUses = None);
 
   /// Try to vectorize a chain that may start at the operands of \p I.
   bool tryToVectorize(Instruction *I, slpvectorizer::BoUpSLP &R);
@@ -131,7 +133,7 @@
 
   /// Tries to vectorize constructs started from CmpInst, InsertValueInst or
   /// InsertElementInst instructions.
-  bool vectorizeSimpleInstructions(SmallVectorImpl<WeakVH> &Instructions,
+  bool vectorizeSimpleInstructions(SmallVectorImpl<Instruction *> &Instructions,
                                    BasicBlock *BB, slpvectorizer::BoUpSLP &R);
 
   /// Scan the basic block and look for patterns that are likely to start
@@ -139,7 +141,7 @@
   bool vectorizeChainsInBlock(BasicBlock *BB, slpvectorizer::BoUpSLP &R);
 
   bool vectorizeStoreChain(ArrayRef<Value *> Chain, slpvectorizer::BoUpSLP &R,
-                           unsigned VecRegSize);
+                           unsigned Idx);
 
   bool vectorizeStores(ArrayRef<StoreInst *> Stores, slpvectorizer::BoUpSLP &R);
 
@@ -147,7 +149,7 @@
   StoreListMap Stores;
 
   /// The getelementptr instructions in a basic block organized by base pointer.
-  WeakTrackingVHListMap GEPs;
+  GEPListMap GEPs;
 };
 
 } // end namespace llvm
diff --git a/linux-x64/clang/include/llvm/Transforms/Vectorize/VectorCombine.h b/linux-x64/clang/include/llvm/Transforms/Vectorize/VectorCombine.h
new file mode 100644
index 0000000..15e2331
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Vectorize/VectorCombine.h
@@ -0,0 +1,30 @@
+//===-------- VectorCombine.h - Optimize partial vector operations --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass optimizes scalar/vector interactions using target cost models. The
+// transforms implemented here may not fit in traditional loop-based or SLP
+// vectorization passes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_VECTOR_VECTORCOMBINE_H
+#define LLVM_TRANSFORMS_VECTOR_VECTORCOMBINE_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Optimize scalar/vector interactions in IR using target cost models.
+struct VectorCombinePass : public PassInfoMixin<VectorCombinePass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
+};
+
+}
+#endif // LLVM_TRANSFORMS_VECTOR_VECTORCOMBINE_H
+