Import prebuilt clang toolchain for linux.
diff --git a/linux-x64/clang/include/llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h b/linux-x64/clang/include/llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h
new file mode 100644
index 0000000..a318d18
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h
@@ -0,0 +1,34 @@
+//===- AggressiveInstCombine.h - AggressiveInstCombine pass -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides the primary interface to the aggressive instcombine pass.
+/// This pass is suitable for use in the new pass manager. For a pass that works
+/// with the legacy pass manager, please look for
+/// \c createAggressiveInstCombinerPass() in Scalar.h.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_AGGRESSIVE_INSTCOMBINE_INSTCOMBINE_H
+#define LLVM_TRANSFORMS_AGGRESSIVE_INSTCOMBINE_INSTCOMBINE_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class AggressiveInstCombinePass
+    : public PassInfoMixin<AggressiveInstCombinePass> {
+
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Coroutines.h b/linux-x64/clang/include/llvm/Transforms/Coroutines.h
new file mode 100644
index 0000000..51beb44
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Coroutines.h
@@ -0,0 +1,38 @@
+//===-- Coroutines.h - Coroutine Transformations ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Declare accessor functions for coroutine lowering passes.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_COROUTINES_H
+#define LLVM_TRANSFORMS_COROUTINES_H
+
+namespace llvm {
+
+class Pass;
+class PassManagerBuilder;
+
+/// Add all coroutine passes to appropriate extension points.
+void addCoroutinePassesToExtensionPoints(PassManagerBuilder &Builder);
+
+/// Lower coroutine intrinsics that are not needed by later passes.
+Pass *createCoroEarlyPass();
+
+/// Split up coroutines into multiple functions driving their state machines.
+Pass *createCoroSplitPass();
+
+/// Analyze coroutines use sites, devirtualize resume/destroy calls and elide
+/// heap allocation for coroutine frame where possible.
+Pass *createCoroElidePass();
+
+/// Lower all remaining coroutine intrinsics.
+Pass *createCoroCleanupPass();
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO.h b/linux-x64/clang/include/llvm/Transforms/IPO.h
new file mode 100644
index 0000000..1514335
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO.h
@@ -0,0 +1,271 @@
+//===- llvm/Transforms/IPO.h - Interprocedural Transformations --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the IPO transformations library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_H
+#define LLVM_TRANSFORMS_IPO_H
+
+#include "llvm/ADT/SmallVector.h"
+#include <functional>
+#include <vector>
+
+namespace llvm {
+
+struct InlineParams;
+class StringRef;
+class ModuleSummaryIndex;
+class ModulePass;
+class Pass;
+class Function;
+class BasicBlock;
+class GlobalValue;
+class raw_ostream;
+
+//===----------------------------------------------------------------------===//
+//
+// These functions removes symbols from functions and modules.  If OnlyDebugInfo
+// is true, only debugging information is removed from the module.
+//
+ModulePass *createStripSymbolsPass(bool OnlyDebugInfo = false);
+
+//===----------------------------------------------------------------------===//
+//
+// These functions strips symbols from functions and modules.
+// Only debugging information is not stripped.
+//
+ModulePass *createStripNonDebugSymbolsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// This pass removes llvm.dbg.declare intrinsics.
+ModulePass *createStripDebugDeclarePass();
+
+//===----------------------------------------------------------------------===//
+//
+// This pass removes unused symbols' debug info.
+ModulePass *createStripDeadDebugInfoPass();
+
+//===----------------------------------------------------------------------===//
+/// createConstantMergePass - This function returns a new pass that merges
+/// duplicate global constants together into a single constant that is shared.
+/// This is useful because some passes (ie TraceValues) insert a lot of string
+/// constants into the program, regardless of whether or not they duplicate an
+/// existing string.
+///
+ModulePass *createConstantMergePass();
+
+//===----------------------------------------------------------------------===//
+/// createGlobalOptimizerPass - This function returns a new pass that optimizes
+/// non-address taken internal globals.
+///
+ModulePass *createGlobalOptimizerPass();
+
+//===----------------------------------------------------------------------===//
+/// createGlobalDCEPass - This transform is designed to eliminate unreachable
+/// internal globals (functions or global variables)
+///
+ModulePass *createGlobalDCEPass();
+
+//===----------------------------------------------------------------------===//
+/// This transform is designed to eliminate available external globals
+/// (functions or global variables)
+///
+ModulePass *createEliminateAvailableExternallyPass();
+
+//===----------------------------------------------------------------------===//
+/// createGVExtractionPass - If deleteFn is true, this pass deletes
+/// the specified global values. Otherwise, it deletes as much of the module as
+/// possible, except for the global values specified.
+///
+ModulePass *createGVExtractionPass(std::vector<GlobalValue*>& GVs, bool
+                                   deleteFn = false);
+
+//===----------------------------------------------------------------------===//
+/// This pass performs iterative function importing from other modules.
+Pass *createFunctionImportPass();
+
+//===----------------------------------------------------------------------===//
+/// createFunctionInliningPass - Return a new pass object that uses a heuristic
+/// to inline direct function calls to small functions.
+///
+/// The Threshold can be passed directly, or asked to be computed from the
+/// given optimization and size optimization arguments.
+///
+/// The -inline-threshold command line option takes precedence over the
+/// threshold given here.
+Pass *createFunctionInliningPass();
+Pass *createFunctionInliningPass(int Threshold);
+Pass *createFunctionInliningPass(unsigned OptLevel, unsigned SizeOptLevel,
+                                 bool DisableInlineHotCallSite);
+Pass *createFunctionInliningPass(InlineParams &Params);
+
+//===----------------------------------------------------------------------===//
+/// createPruneEHPass - Return a new pass object which transforms invoke
+/// instructions into calls, if the callee can _not_ unwind the stack.
+///
+Pass *createPruneEHPass();
+
+//===----------------------------------------------------------------------===//
+/// createInternalizePass - This pass loops over all of the functions in the
+/// input module, internalizing all globals (functions and variables) it can.
+////
+/// Before internalizing a symbol, the callback \p MustPreserveGV is invoked and
+/// gives to the client the ability to prevent internalizing specific symbols.
+///
+/// The symbol in DSOList are internalized if it is safe to drop them from
+/// the symbol table.
+///
+/// Note that commandline options that are used with the above function are not
+/// used now!
+ModulePass *
+createInternalizePass(std::function<bool(const GlobalValue &)> MustPreserveGV);
+
+/// createInternalizePass - Same as above, but with an empty exportList.
+ModulePass *createInternalizePass();
+
+//===----------------------------------------------------------------------===//
+/// createDeadArgEliminationPass - This pass removes arguments from functions
+/// which are not used by the body of the function.
+///
+ModulePass *createDeadArgEliminationPass();
+
+/// DeadArgHacking pass - Same as DAE, but delete arguments of external
+/// functions as well.  This is definitely not safe, and should only be used by
+/// bugpoint.
+ModulePass *createDeadArgHackingPass();
+
+//===----------------------------------------------------------------------===//
+/// createArgumentPromotionPass - This pass promotes "by reference" arguments to
+/// be passed by value if the number of elements passed is smaller or
+/// equal to maxElements (maxElements == 0 means always promote).
+///
+Pass *createArgumentPromotionPass(unsigned maxElements = 3);
+
+//===----------------------------------------------------------------------===//
+/// createIPConstantPropagationPass - This pass propagates constants from call
+/// sites into the bodies of functions.
+///
+ModulePass *createIPConstantPropagationPass();
+
+//===----------------------------------------------------------------------===//
+/// createIPSCCPPass - This pass propagates constants from call sites into the
+/// bodies of functions, and keeps track of whether basic blocks are executable
+/// in the process.
+///
+ModulePass *createIPSCCPPass();
+
+//===----------------------------------------------------------------------===//
+//
+/// createLoopExtractorPass - This pass extracts all natural loops from the
+/// program into a function if it can.
+///
+Pass *createLoopExtractorPass();
+
+/// createSingleLoopExtractorPass - This pass extracts one natural loop from the
+/// program into a function if it can.  This is used by bugpoint.
+///
+Pass *createSingleLoopExtractorPass();
+
+/// createBlockExtractorPass - This pass extracts all the specified blocks
+/// from the functions in the module.
+///
+ModulePass *createBlockExtractorPass();
+ModulePass *
+createBlockExtractorPass(const SmallVectorImpl<BasicBlock *> &BlocksToExtract,
+                         bool EraseFunctions);
+
+/// createStripDeadPrototypesPass - This pass removes any function declarations
+/// (prototypes) that are not used.
+ModulePass *createStripDeadPrototypesPass();
+
+//===----------------------------------------------------------------------===//
+/// createReversePostOrderFunctionAttrsPass - This pass walks SCCs of the call
+/// graph in RPO to deduce and propagate function attributes. Currently it
+/// only handles synthesizing norecurse attributes.
+///
+Pass *createReversePostOrderFunctionAttrsPass();
+
+//===----------------------------------------------------------------------===//
+/// createMergeFunctionsPass - This pass discovers identical functions and
+/// collapses them.
+///
+ModulePass *createMergeFunctionsPass();
+
+//===----------------------------------------------------------------------===//
+/// createPartialInliningPass - This pass inlines parts of functions.
+///
+ModulePass *createPartialInliningPass();
+
+//===----------------------------------------------------------------------===//
+/// createBarrierNoopPass - This pass is purely a module pass barrier in a pass
+/// manager.
+ModulePass *createBarrierNoopPass();
+
+/// createCalledValuePropagationPass - Attach metadata to indirct call sites
+/// indicating the set of functions they may target at run-time.
+ModulePass *createCalledValuePropagationPass();
+
+/// What to do with the summary when running passes that operate on it.
+enum class PassSummaryAction {
+  None,   ///< Do nothing.
+  Import, ///< Import information from summary.
+  Export, ///< Export information to summary.
+};
+
+/// \brief This pass lowers type metadata and the llvm.type.test intrinsic to
+/// bitsets.
+///
+/// The behavior depends on the summary arguments:
+/// - If ExportSummary is non-null, this pass will export type identifiers to
+///   the given summary.
+/// - Otherwise, if ImportSummary is non-null, this pass will import type
+///   identifiers from the given summary.
+/// - Otherwise it does neither.
+/// It is invalid for both ExportSummary and ImportSummary to be non-null.
+ModulePass *createLowerTypeTestsPass(ModuleSummaryIndex *ExportSummary,
+                                     const ModuleSummaryIndex *ImportSummary);
+
+/// \brief This pass export CFI checks for use by external modules.
+ModulePass *createCrossDSOCFIPass();
+
+/// \brief This pass implements whole-program devirtualization using type
+/// metadata.
+///
+/// The behavior depends on the summary arguments:
+/// - If ExportSummary is non-null, this pass will export type identifiers to
+///   the given summary.
+/// - Otherwise, if ImportSummary is non-null, this pass will import type
+///   identifiers from the given summary.
+/// - Otherwise it does neither.
+/// It is invalid for both ExportSummary and ImportSummary to be non-null.
+ModulePass *
+createWholeProgramDevirtPass(ModuleSummaryIndex *ExportSummary,
+                             const ModuleSummaryIndex *ImportSummary);
+
+/// This pass splits globals into pieces for the benefit of whole-program
+/// devirtualization and control-flow integrity.
+ModulePass *createGlobalSplitPass();
+
+//===----------------------------------------------------------------------===//
+// SampleProfilePass - Loads sample profile data from disk and generates
+// IR metadata to reflect the profile.
+ModulePass *createSampleProfileLoaderPass();
+ModulePass *createSampleProfileLoaderPass(StringRef Name);
+
+/// Write ThinLTO-ready bitcode to Str.
+ModulePass *createWriteThinLTOBitcodePass(raw_ostream &Str,
+                                          raw_ostream *ThinLinkOS = nullptr);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/AlwaysInliner.h b/linux-x64/clang/include/llvm/Transforms/IPO/AlwaysInliner.h
new file mode 100644
index 0000000..b52c0fd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/AlwaysInliner.h
@@ -0,0 +1,46 @@
+//===-- AlwaysInliner.h - Pass to inline "always_inline" functions --------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Provides passes to inlining "always_inline" functions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_ALWAYSINLINER_H
+#define LLVM_TRANSFORMS_IPO_ALWAYSINLINER_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Inlines functions marked as "always_inline".
+///
+/// Note that this does not inline call sites marked as always_inline and does
+/// not delete the functions even when all users are inlined. The normal
+/// inliner should be used to handle call site inlining, this pass's goal is to
+/// be the simplest possible pass to remove always_inline function definitions'
+/// uses by inlining them. The \c GlobalDCE pass can be used to remove these
+/// functions once all users are gone.
+class AlwaysInlinerPass : public PassInfoMixin<AlwaysInlinerPass> {
+  bool InsertLifetime;
+
+public:
+  AlwaysInlinerPass(bool InsertLifetime = true)
+      : InsertLifetime(InsertLifetime) {}
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+};
+
+/// Create a legacy pass manager instance of a pass to inline and remove
+/// functions marked as "always_inline".
+Pass *createAlwaysInlinerLegacyPass(bool InsertLifetime = true);
+
+}
+
+#endif // LLVM_TRANSFORMS_IPO_ALWAYSINLINER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/ArgumentPromotion.h b/linux-x64/clang/include/llvm/Transforms/IPO/ArgumentPromotion.h
new file mode 100644
index 0000000..49ca6cc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/ArgumentPromotion.h
@@ -0,0 +1,36 @@
+//===- ArgumentPromotion.h - Promote by-reference arguments -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_ARGUMENTPROMOTION_H
+#define LLVM_TRANSFORMS_IPO_ARGUMENTPROMOTION_H
+
+#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Argument promotion pass.
+///
+/// This pass walks the functions in each SCC and for each one tries to
+/// transform it and all of its callers to replace indirect arguments with
+/// direct (by-value) arguments.
+class ArgumentPromotionPass : public PassInfoMixin<ArgumentPromotionPass> {
+  unsigned MaxElements;
+
+public:
+  ArgumentPromotionPass(unsigned MaxElements = 3u) : MaxElements(MaxElements) {}
+
+  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &UR);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_ARGUMENTPROMOTION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/CalledValuePropagation.h b/linux-x64/clang/include/llvm/Transforms/IPO/CalledValuePropagation.h
new file mode 100644
index 0000000..352bdc7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/CalledValuePropagation.h
@@ -0,0 +1,35 @@
+//===- CalledValuePropagation.h - Propagate called values -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a transformation that attaches !callees metadata to
+// indirect call sites. For a given call site, the metadata, if present,
+// indicates the set of functions the call site could possibly target at
+// run-time. This metadata is added to indirect call sites when the set of
+// possible targets can be determined by analysis and is known to be small. The
+// analysis driving the transformation is similar to constant propagation and
+// makes uses of the generic sparse propagation solver.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_CALLEDVALUEPROPAGATION_H
+#define LLVM_TRANSFORMS_IPO_CALLEDVALUEPROPAGATION_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class CalledValuePropagationPass
+    : public PassInfoMixin<CalledValuePropagationPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_CALLEDVALUEPROPAGATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/ConstantMerge.h b/linux-x64/clang/include/llvm/Transforms/IPO/ConstantMerge.h
new file mode 100644
index 0000000..e04d3ae
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/ConstantMerge.h
@@ -0,0 +1,37 @@
+//===- ConstantMerge.h - Merge duplicate global constants -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface to a pass that merges duplicate global
+// constants together into a single constant that is shared.  This is useful
+// because some passes (ie TraceValues) insert a lot of string constants into
+// the program, regardless of whether or not an existing string is available.
+//
+// Algorithm: ConstantMerge is designed to build up a map of available constants
+// and eliminate duplicates when it is initialized.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_CONSTANTMERGE_H
+#define LLVM_TRANSFORMS_IPO_CONSTANTMERGE_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+/// A pass that merges duplicate global constants into a single constant.
+class ConstantMergePass : public PassInfoMixin<ConstantMergePass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_CONSTANTMERGE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/CrossDSOCFI.h b/linux-x64/clang/include/llvm/Transforms/IPO/CrossDSOCFI.h
new file mode 100644
index 0000000..0979f5b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/CrossDSOCFI.h
@@ -0,0 +1,28 @@
+//===-- CrossDSOCFI.cpp - Externalize this module's CFI checks --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass exports all llvm.bitset's found in the module in the form of a
+// __cfi_check function, which can be used to verify cross-DSO call targets.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_CROSSDSOCFI_H
+#define LLVM_TRANSFORMS_IPO_CROSSDSOCFI_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class CrossDSOCFIPass : public PassInfoMixin<CrossDSOCFIPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+}
+#endif // LLVM_TRANSFORMS_IPO_CROSSDSOCFI_H
+
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/DeadArgumentElimination.h b/linux-x64/clang/include/llvm/Transforms/IPO/DeadArgumentElimination.h
new file mode 100644
index 0000000..ba5666f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/DeadArgumentElimination.h
@@ -0,0 +1,144 @@
+//===- DeadArgumentElimination.h - Eliminate Dead Args ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass deletes dead arguments from internal functions.  Dead argument
+// elimination removes arguments which are directly dead, as well as arguments
+// only passed into function calls as dead arguments of other functions.  This
+// pass also deletes dead return values in a similar way.
+//
+// This pass is often useful as a cleanup pass to run after aggressive
+// interprocedural passes, which add possibly-dead arguments or return values.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_DEADARGUMENTELIMINATION_H
+#define LLVM_TRANSFORMS_IPO_DEADARGUMENTELIMINATION_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+#include <map>
+#include <set>
+#include <string>
+#include <tuple>
+
+namespace llvm {
+
+class Module;
+class Use;
+class Value;
+
+/// Eliminate dead arguments (and return values) from functions.
+class DeadArgumentEliminationPass
+    : public PassInfoMixin<DeadArgumentEliminationPass> {
+public:
+  /// Struct that represents (part of) either a return value or a function
+  /// argument.  Used so that arguments and return values can be used
+  /// interchangeably.
+  struct RetOrArg {
+    const Function *F;
+    unsigned Idx;
+    bool IsArg;
+
+    RetOrArg(const Function *F, unsigned Idx, bool IsArg)
+        : F(F), Idx(Idx), IsArg(IsArg) {}
+
+    /// Make RetOrArg comparable, so we can put it into a map.
+    bool operator<(const RetOrArg &O) const {
+      return std::tie(F, Idx, IsArg) < std::tie(O.F, O.Idx, O.IsArg);
+    }
+
+    /// Make RetOrArg comparable, so we can easily iterate the multimap.
+    bool operator==(const RetOrArg &O) const {
+      return F == O.F && Idx == O.Idx && IsArg == O.IsArg;
+    }
+
+    std::string getDescription() const {
+      return (Twine(IsArg ? "Argument #" : "Return value #") + Twine(Idx) +
+              " of function " + F->getName())
+          .str();
+    }
+  };
+
+  /// Liveness enum - During our initial pass over the program, we determine
+  /// that things are either alive or maybe alive. We don't mark anything
+  /// explicitly dead (even if we know they are), since anything not alive
+  /// with no registered uses (in Uses) will never be marked alive and will
+  /// thus become dead in the end.
+  enum Liveness { Live, MaybeLive };
+
+  DeadArgumentEliminationPass(bool ShouldHackArguments_ = false)
+      : ShouldHackArguments(ShouldHackArguments_) {}
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+
+  /// Convenience wrapper
+  RetOrArg CreateRet(const Function *F, unsigned Idx) {
+    return RetOrArg(F, Idx, false);
+  }
+
+  /// Convenience wrapper
+  RetOrArg CreateArg(const Function *F, unsigned Idx) {
+    return RetOrArg(F, Idx, true);
+  }
+
+  using UseMap = std::multimap<RetOrArg, RetOrArg>;
+
+  /// This maps a return value or argument to any MaybeLive return values or
+  /// arguments it uses. This allows the MaybeLive values to be marked live
+  /// when any of its users is marked live.
+  /// For example (indices are left out for clarity):
+  ///  - Uses[ret F] = ret G
+  ///    This means that F calls G, and F returns the value returned by G.
+  ///  - Uses[arg F] = ret G
+  ///    This means that some function calls G and passes its result as an
+  ///    argument to F.
+  ///  - Uses[ret F] = arg F
+  ///    This means that F returns one of its own arguments.
+  ///  - Uses[arg F] = arg G
+  ///    This means that G calls F and passes one of its own (G's) arguments
+  ///    directly to F.
+  UseMap Uses;
+
+  using LiveSet = std::set<RetOrArg>;
+  using LiveFuncSet = std::set<const Function *>;
+
+  /// This set contains all values that have been determined to be live.
+  LiveSet LiveValues;
+
+  /// This set contains all values that are cannot be changed in any way.
+  LiveFuncSet LiveFunctions;
+
+  using UseVector = SmallVector<RetOrArg, 5>;
+
+  /// This allows this pass to do double-duty as the dead arg hacking pass
+  /// (used only by bugpoint).
+  bool ShouldHackArguments = false;
+
+private:
+  Liveness MarkIfNotLive(RetOrArg Use, UseVector &MaybeLiveUses);
+  Liveness SurveyUse(const Use *U, UseVector &MaybeLiveUses,
+                     unsigned RetValNum = -1U);
+  Liveness SurveyUses(const Value *V, UseVector &MaybeLiveUses);
+
+  void SurveyFunction(const Function &F);
+  void MarkValue(const RetOrArg &RA, Liveness L,
+                 const UseVector &MaybeLiveUses);
+  void MarkLive(const RetOrArg &RA);
+  void MarkLive(const Function &F);
+  void PropagateLiveness(const RetOrArg &RA);
+  bool RemoveDeadStuffFromFunction(Function *F);
+  bool DeleteDeadVarargs(Function &Fn);
+  bool RemoveDeadArgumentsFromCallers(Function &Fn);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_DEADARGUMENTELIMINATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/ElimAvailExtern.h b/linux-x64/clang/include/llvm/Transforms/IPO/ElimAvailExtern.h
new file mode 100644
index 0000000..94cb954
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/ElimAvailExtern.h
@@ -0,0 +1,33 @@
+//===- ElimAvailExtern.h - Optimize Global Variables ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This transform is designed to eliminate available external global
+// definitions from the program, turning them into declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_ELIMAVAILEXTERN_H
+#define LLVM_TRANSFORMS_IPO_ELIMAVAILEXTERN_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+/// A pass that transforms external global definitions into declarations.
+class EliminateAvailableExternallyPass
+    : public PassInfoMixin<EliminateAvailableExternallyPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_ELIMAVAILEXTERN_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/ForceFunctionAttrs.h b/linux-x64/clang/include/llvm/Transforms/IPO/ForceFunctionAttrs.h
new file mode 100644
index 0000000..ff8a654
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/ForceFunctionAttrs.h
@@ -0,0 +1,33 @@
+//===-- ForceFunctionAttrs.h - Force function attrs for debugging ---------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// Super simple passes to force specific function attrs from the commandline
+/// into the IR for debugging purposes.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_FORCEFUNCTIONATTRS_H
+#define LLVM_TRANSFORMS_IPO_FORCEFUNCTIONATTRS_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Pass which forces specific function attributes into the IR, primarily as
+/// a debugging tool.
+struct ForceFunctionAttrsPass : PassInfoMixin<ForceFunctionAttrsPass> {
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+};
+
+/// Create a legacy pass manager instance of a pass to force function attrs.
+Pass *createForceFunctionAttrsLegacyPass();
+
+}
+
+#endif // LLVM_TRANSFORMS_IPO_FORCEFUNCTIONATTRS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/FunctionAttrs.h b/linux-x64/clang/include/llvm/Transforms/IPO/FunctionAttrs.h
new file mode 100644
index 0000000..dc9f18c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/FunctionAttrs.h
@@ -0,0 +1,77 @@
+//===- FunctionAttrs.h - Compute function attributes ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// Provides passes for computing function attributes based on interprocedural
+/// analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_FUNCTIONATTRS_H
+#define LLVM_TRANSFORMS_IPO_FUNCTIONATTRS_H
+
+#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class AAResults;
+class Function;
+class Module;
+class Pass;
+
+/// The three kinds of memory access relevant to 'readonly' and
+/// 'readnone' attributes.
+enum MemoryAccessKind {
+  MAK_ReadNone = 0,
+  MAK_ReadOnly = 1,
+  MAK_MayWrite = 2
+};
+
+/// Returns the memory access properties of this copy of the function.
+MemoryAccessKind computeFunctionBodyMemoryAccess(Function &F, AAResults &AAR);
+
+/// Computes function attributes in post-order over the call graph.
+///
+/// By operating in post-order, this pass computes precise attributes for
+/// called functions prior to processsing their callers. This "bottom-up"
+/// approach allows powerful interprocedural inference of function attributes
+/// like memory access patterns, etc. It can discover functions that do not
+/// access memory, or only read memory, and give them the readnone/readonly
+/// attribute. It also discovers function arguments that are not captured by
+/// the function and marks them with the nocapture attribute.
+struct PostOrderFunctionAttrsPass : PassInfoMixin<PostOrderFunctionAttrsPass> {
+  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &UR);
+};
+
+/// Create a legacy pass manager instance of a pass to compute function attrs
+/// in post-order.
+Pass *createPostOrderFunctionAttrsLegacyPass();
+
+/// A pass to do RPO deduction and propagation of function attributes.
+///
+/// This pass provides a general RPO or "top down" propagation of
+/// function attributes. For a few (rare) cases, we can deduce significantly
+/// more about function attributes by working in RPO, so this pass
+/// provides the complement to the post-order pass above where the majority of
+/// deduction is performed.
+// FIXME: Currently there is no RPO CGSCC pass structure to slide into and so
+// this is a boring module pass, but eventually it should be an RPO CGSCC pass
+// when such infrastructure is available.
+class ReversePostOrderFunctionAttrsPass
+    : public PassInfoMixin<ReversePostOrderFunctionAttrsPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_FUNCTIONATTRS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/FunctionImport.h b/linux-x64/clang/include/llvm/Transforms/IPO/FunctionImport.h
new file mode 100644
index 0000000..5fedde1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/FunctionImport.h
@@ -0,0 +1,162 @@
+//===- llvm/Transforms/IPO/FunctionImport.h - ThinLTO importing -*- C++ -*-===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_FUNCTIONIMPORT_H
+#define LLVM_TRANSFORMS_IPO_FUNCTIONIMPORT_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/Error.h"
+#include <functional>
+#include <map>
+#include <memory>
+#include <string>
+#include <system_error>
+#include <unordered_set>
+#include <utility>
+
+namespace llvm {
+
+class Module;
+
+/// The function importer is automatically importing function from other modules
+/// based on the provided summary informations.
+class FunctionImporter {
+public:
+  /// Set of functions to import from a source module. Each entry is a map
+  /// containing all the functions to import for a source module.
+  /// The keys is the GUID identifying a function to import, and the value
+  /// is the threshold applied when deciding to import it.
+  using FunctionsToImportTy = std::map<GlobalValue::GUID, unsigned>;
+
+  /// The map contains an entry for every module to import from, the key being
+  /// the module identifier to pass to the ModuleLoader. The value is the set of
+  /// functions to import.
+  using ImportMapTy = StringMap<FunctionsToImportTy>;
+
+  /// The set contains an entry for every global value the module exports.
+  using ExportSetTy = std::unordered_set<GlobalValue::GUID>;
+
+  /// A function of this type is used to load modules referenced by the index.
+  using ModuleLoaderTy =
+      std::function<Expected<std::unique_ptr<Module>>(StringRef Identifier)>;
+
+  /// Create a Function Importer.
+  FunctionImporter(const ModuleSummaryIndex &Index, ModuleLoaderTy ModuleLoader)
+      : Index(Index), ModuleLoader(std::move(ModuleLoader)) {}
+
+  /// Import functions in Module \p M based on the supplied import list.
+  Expected<bool> importFunctions(Module &M, const ImportMapTy &ImportList);
+
+private:
+  /// The summaries index used to trigger importing.
+  const ModuleSummaryIndex &Index;
+
+  /// Factory function to load a Module for a given identifier
+  ModuleLoaderTy ModuleLoader;
+};
+
+/// The function importing pass
+class FunctionImportPass : public PassInfoMixin<FunctionImportPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+/// Compute all the imports and exports for every module in the Index.
+///
+/// \p ModuleToDefinedGVSummaries contains for each Module a map
+/// (GUID -> Summary) for every global defined in the module.
+///
+/// \p ImportLists will be populated with an entry for every Module we are
+/// importing into. This entry is itself a map that can be passed to
+/// FunctionImporter::importFunctions() above (see description there).
+///
+/// \p ExportLists contains for each Module the set of globals (GUID) that will
+/// be imported by another module, or referenced by such a function. I.e. this
+/// is the set of globals that need to be promoted/renamed appropriately.
+void ComputeCrossModuleImport(
+    const ModuleSummaryIndex &Index,
+    const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
+    StringMap<FunctionImporter::ImportMapTy> &ImportLists,
+    StringMap<FunctionImporter::ExportSetTy> &ExportLists);
+
+/// Compute all the imports for the given module using the Index.
+///
+/// \p ImportList will be populated with a map that can be passed to
+/// FunctionImporter::importFunctions() above (see description there).
+void ComputeCrossModuleImportForModule(
+    StringRef ModulePath, const ModuleSummaryIndex &Index,
+    FunctionImporter::ImportMapTy &ImportList);
+
+/// Mark all external summaries in \p Index for import into the given module.
+/// Used for distributed builds using a distributed index.
+///
+/// \p ImportList will be populated with a map that can be passed to
+/// FunctionImporter::importFunctions() above (see description there).
+void ComputeCrossModuleImportForModuleFromIndex(
+    StringRef ModulePath, const ModuleSummaryIndex &Index,
+    FunctionImporter::ImportMapTy &ImportList);
+
+/// PrevailingType enum used as a return type of callback passed
+/// to computeDeadSymbols. Yes and No values used when status explicitly
+/// set by symbols resolution, otherwise status is Unknown.
+enum class PrevailingType { Yes, No, Unknown };
+
+/// Compute all the symbols that are "dead": i.e these that can't be reached
+/// in the graph from any of the given symbols listed in
+/// \p GUIDPreservedSymbols. Non-prevailing symbols are symbols without a
+/// prevailing copy anywhere in IR and are normally dead, \p isPrevailing
+/// predicate returns status of symbol.
+void computeDeadSymbols(
+    ModuleSummaryIndex &Index,
+    const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols,
+    function_ref<PrevailingType(GlobalValue::GUID)> isPrevailing);
+
+/// Converts value \p GV to declaration, or replaces with a declaration if
+/// it is an alias. Returns true if converted, false if replaced.
+bool convertToDeclaration(GlobalValue &GV);
+
+/// Compute the set of summaries needed for a ThinLTO backend compilation of
+/// \p ModulePath.
+//
+/// This includes summaries from that module (in case any global summary based
+/// optimizations were recorded) and from any definitions in other modules that
+/// should be imported.
+//
+/// \p ModuleToSummariesForIndex will be populated with the needed summaries
+/// from each required module path. Use a std::map instead of StringMap to get
+/// stable order for bitcode emission.
+void gatherImportedSummariesForModule(
+    StringRef ModulePath,
+    const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
+    const FunctionImporter::ImportMapTy &ImportList,
+    std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex);
+
+/// Emit into \p OutputFilename the files module \p ModulePath will import from.
+std::error_code
+EmitImportsFiles(StringRef ModulePath, StringRef OutputFilename,
+                 const FunctionImporter::ImportMapTy &ModuleImports);
+
+/// Resolve WeakForLinker values in \p TheModule based on the information
+/// recorded in the summaries during global summary-based analysis.
+void thinLTOResolveWeakForLinkerModule(Module &TheModule,
+                                       const GVSummaryMapTy &DefinedGlobals);
+
+/// Internalize \p TheModule based on the information recorded in the summaries
+/// during global summary-based analysis.
+void thinLTOInternalizeModule(Module &TheModule,
+                              const GVSummaryMapTy &DefinedGlobals);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_FUNCTIONIMPORT_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/GlobalDCE.h b/linux-x64/clang/include/llvm/Transforms/IPO/GlobalDCE.h
new file mode 100644
index 0000000..7ca241f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/GlobalDCE.h
@@ -0,0 +1,57 @@
+//===-- GlobalDCE.h - DCE unreachable internal functions ------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This transform is designed to eliminate unreachable internal globals from the
+// program.  It uses an aggressive algorithm, searching out globals that are
+// known to be alive.  After it finds all of the globals which are needed, it
+// deletes whatever is left over.  This allows it to delete recursive chunks of
+// the program which are unreachable.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_GLOBALDCE_H
+#define LLVM_TRANSFORMS_IPO_GLOBALDCE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include <unordered_map>
+
+namespace llvm {
+
+/// Pass to remove unused function declarations.
+class GlobalDCEPass : public PassInfoMixin<GlobalDCEPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+
+private:
+  SmallPtrSet<GlobalValue*, 32> AliveGlobals;
+
+  /// Global -> Global that uses this global.
+  DenseMap<GlobalValue *, SmallPtrSet<GlobalValue *, 4>> GVDependencies;
+
+  /// Constant -> Globals that use this global cache.
+  std::unordered_map<Constant *, SmallPtrSet<GlobalValue *, 8>>
+      ConstantDependenciesCache;
+
+  /// Comdat -> Globals in that Comdat section.
+  std::unordered_multimap<Comdat *, GlobalValue *> ComdatMembers;
+
+  void UpdateGVDependencies(GlobalValue &GV);
+  void MarkLive(GlobalValue &GV,
+                SmallVectorImpl<GlobalValue *> *Updates = nullptr);
+  bool RemoveUnusedGlobalValue(GlobalValue &GV);
+
+  void ComputeDependencies(Value *V, SmallPtrSetImpl<GlobalValue *> &U);
+};
+
+}
+
+#endif // LLVM_TRANSFORMS_IPO_GLOBALDCE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/GlobalOpt.h b/linux-x64/clang/include/llvm/Transforms/IPO/GlobalOpt.h
new file mode 100644
index 0000000..5b48786
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/GlobalOpt.h
@@ -0,0 +1,33 @@
+//===- GlobalOpt.h - Optimize Global Variables ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass transforms simple global variables that never have their address
+// taken.  If obviously true, it marks read/write globals as constant, deletes
+// variables only stored to, etc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_GLOBALOPT_H
+#define LLVM_TRANSFORMS_IPO_GLOBALOPT_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+/// Optimize globals that never have their address taken.
+class GlobalOptPass : public PassInfoMixin<GlobalOptPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_GLOBALOPT_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/GlobalSplit.h b/linux-x64/clang/include/llvm/Transforms/IPO/GlobalSplit.h
new file mode 100644
index 0000000..56cefb7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/GlobalSplit.h
@@ -0,0 +1,34 @@
+//===- GlobalSplit.h - global variable splitter -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass uses inrange annotations on GEP indices to split globals where
+// beneficial. Clang currently attaches these annotations to references to
+// virtual table globals under the Itanium ABI for the benefit of the
+// whole-program virtual call optimization and control flow integrity passes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_GLOBALSPLIT_H
+#define LLVM_TRANSFORMS_IPO_GLOBALSPLIT_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+/// Pass to perform split of global variables.
+class GlobalSplitPass : public PassInfoMixin<GlobalSplitPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_GLOBALSPLIT_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/InferFunctionAttrs.h b/linux-x64/clang/include/llvm/Transforms/IPO/InferFunctionAttrs.h
new file mode 100644
index 0000000..54e1c24
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/InferFunctionAttrs.h
@@ -0,0 +1,36 @@
+//===-- InferFunctionAttrs.h - Infer implicit function attributes ---------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Interfaces for passes which infer implicit function attributes from the
+/// name and signature of function declarations.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_INFERFUNCTIONATTRS_H
+#define LLVM_TRANSFORMS_IPO_INFERFUNCTIONATTRS_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// A pass which infers function attributes from the names and signatures of
+/// function declarations in a module.
+struct InferFunctionAttrsPass : PassInfoMixin<InferFunctionAttrsPass> {
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+/// Create a legacy pass manager instance of a pass to infer function
+/// attributes.
+Pass *createInferFunctionAttrsLegacyPass();
+
+}
+
+#endif // LLVM_TRANSFORMS_IPO_INFERFUNCTIONATTRS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/Inliner.h b/linux-x64/clang/include/llvm/Transforms/IPO/Inliner.h
new file mode 100644
index 0000000..eda8cf4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/Inliner.h
@@ -0,0 +1,109 @@
+//===- Inliner.h - Inliner pass and infrastructure --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_INLINER_H
+#define LLVM_TRANSFORMS_IPO_INLINER_H
+
+#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/Analysis/CallGraphSCCPass.h"
+#include "llvm/Analysis/InlineCost.h"
+#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h"
+#include <utility>
+
+namespace llvm {
+
+class AssumptionCacheTracker;
+class CallGraph;
+class ProfileSummaryInfo;
+
+/// This class contains all of the helper code which is used to perform the
+/// inlining operations that do not depend on the policy. It contains the core
+/// bottom-up inlining infrastructure that specific inliner passes use.
+struct LegacyInlinerBase : public CallGraphSCCPass {
+  explicit LegacyInlinerBase(char &ID);
+  explicit LegacyInlinerBase(char &ID, bool InsertLifetime);
+
+  /// For this class, we declare that we require and preserve the call graph.
+  /// If the derived class implements this method, it should always explicitly
+  /// call the implementation here.
+  void getAnalysisUsage(AnalysisUsage &Info) const override;
+
+  bool doInitialization(CallGraph &CG) override;
+
+  /// Main run interface method, this implements the interface required by the
+  /// Pass class.
+  bool runOnSCC(CallGraphSCC &SCC) override;
+
+  using llvm::Pass::doFinalization;
+
+  /// Remove now-dead linkonce functions at the end of processing to avoid
+  /// breaking the SCC traversal.
+  bool doFinalization(CallGraph &CG) override;
+
+  /// This method must be implemented by the subclass to determine the cost of
+  /// inlining the specified call site.  If the cost returned is greater than
+  /// the current inline threshold, the call site is not inlined.
+  virtual InlineCost getInlineCost(CallSite CS) = 0;
+
+  /// Remove dead functions.
+  ///
+  /// This also includes a hack in the form of the 'AlwaysInlineOnly' flag
+  /// which restricts it to deleting functions with an 'AlwaysInline'
+  /// attribute. This is useful for the InlineAlways pass that only wants to
+  /// deal with that subset of the functions.
+  bool removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly = false);
+
+  /// This function performs the main work of the pass.  The default of
+  /// Inlinter::runOnSCC() calls skipSCC() before calling this method, but
+  /// derived classes which cannot be skipped can override that method and call
+  /// this function unconditionally.
+  bool inlineCalls(CallGraphSCC &SCC);
+
+private:
+  // Insert @llvm.lifetime intrinsics.
+  bool InsertLifetime = true;
+
+protected:
+  AssumptionCacheTracker *ACT;
+  ProfileSummaryInfo *PSI;
+  ImportedFunctionsInliningStatistics ImportedFunctionsStats;
+};
+
+/// The inliner pass for the new pass manager.
+///
+/// This pass wires together the inlining utilities and the inline cost
+/// analysis into a CGSCC pass. It considers every call in every function in
+/// the SCC and tries to inline if profitable. It can be tuned with a number of
+/// parameters to control what cost model is used and what tradeoffs are made
+/// when making the decision.
+///
+/// It should be noted that the legacy inliners do considerably more than this
+/// inliner pass does. They provide logic for manually merging allocas, and
+/// doing considerable DCE including the DCE of dead functions. This pass makes
+/// every attempt to be simpler. DCE of functions requires complex reasoning
+/// about comdat groups, etc. Instead, it is expected that other more focused
+/// passes be composed to achieve the same end result.
+class InlinerPass : public PassInfoMixin<InlinerPass> {
+public:
+  InlinerPass(InlineParams Params = getInlineParams())
+      : Params(std::move(Params)) {}
+
+  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &UR);
+
+private:
+  InlineParams Params;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_INLINER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/Internalize.h b/linux-x64/clang/include/llvm/Transforms/IPO/Internalize.h
new file mode 100644
index 0000000..45d676d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/Internalize.h
@@ -0,0 +1,79 @@
+//====- Internalize.h - Internalization API ---------------------*- C++ -*-===//
+//
+//                      The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass loops over all of the functions and variables in the input module.
+// If the function or variable does not need to be preserved according to the
+// client supplied callback, it is marked as internal.
+//
+// This transformation would not be legal in a regular compilation, but it gets
+// extra information from the linker about what is safe.
+//
+// For example: Internalizing a function with external linkage. Only if we are
+// told it is only used from within this module, it is safe to do it.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_INTERNALIZE_H
+#define LLVM_TRANSFORMS_IPO_INTERNALIZE_H
+
+#include "llvm/ADT/StringSet.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/PassManager.h"
+#include <functional>
+#include <set>
+
+namespace llvm {
+class Module;
+class CallGraph;
+
+/// A pass that internalizes all functions and variables other than those that
+/// must be preserved according to \c MustPreserveGV.
+class InternalizePass : public PassInfoMixin<InternalizePass> {
+  /// Client supplied callback to control wheter a symbol must be preserved.
+  const std::function<bool(const GlobalValue &)> MustPreserveGV;
+  /// Set of symbols private to the compiler that this pass should not touch.
+  StringSet<> AlwaysPreserved;
+
+  /// Return false if we're allowed to internalize this GV.
+  bool shouldPreserveGV(const GlobalValue &GV);
+  /// Internalize GV if it is possible to do so, i.e. it is not externally
+  /// visible and is not a member of an externally visible comdat.
+  bool maybeInternalize(GlobalValue &GV,
+                        const std::set<const Comdat *> &ExternalComdats);
+  /// If GV is part of a comdat and is externally visible, keep track of its
+  /// comdat so that we don't internalize any of its members.
+  void checkComdatVisibility(GlobalValue &GV,
+                             std::set<const Comdat *> &ExternalComdats);
+
+public:
+  InternalizePass();
+  InternalizePass(std::function<bool(const GlobalValue &)> MustPreserveGV)
+      : MustPreserveGV(std::move(MustPreserveGV)) {}
+
+  /// Run the internalizer on \p TheModule, returns true if any changes was
+  /// made.
+  ///
+  /// If the CallGraph \p CG is supplied, it will be updated when
+  /// internalizing a function (by removing any edge from the "external node")
+  bool internalizeModule(Module &TheModule, CallGraph *CG = nullptr);
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+/// Helper function to internalize functions and variables in a Module.
+inline bool
+internalizeModule(Module &TheModule,
+                  std::function<bool(const GlobalValue &)> MustPreserveGV,
+                  CallGraph *CG = nullptr) {
+  return InternalizePass(std::move(MustPreserveGV))
+      .internalizeModule(TheModule, CG);
+}
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_INTERNALIZE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/LowerTypeTests.h b/linux-x64/clang/include/llvm/Transforms/IPO/LowerTypeTests.h
new file mode 100644
index 0000000..3bcfe65
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/LowerTypeTests.h
@@ -0,0 +1,205 @@
+//===- LowerTypeTests.h - type metadata lowering pass -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines parts of the type test lowering pass implementation that
+// may be usefully unit tested.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_LOWERTYPETESTS_H
+#define LLVM_TRANSFORMS_IPO_LOWERTYPETESTS_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/PassManager.h"
+#include <cstdint>
+#include <cstring>
+#include <limits>
+#include <set>
+#include <vector>
+
+namespace llvm {
+
+class Module;
+class raw_ostream;
+
+namespace lowertypetests {
+
+struct BitSetInfo {
+  // The indices of the set bits in the bitset.
+  std::set<uint64_t> Bits;
+
+  // The byte offset into the combined global represented by the bitset.
+  uint64_t ByteOffset;
+
+  // The size of the bitset in bits.
+  uint64_t BitSize;
+
+  // Log2 alignment of the bit set relative to the combined global.
+  // For example, a log2 alignment of 3 means that bits in the bitset
+  // represent addresses 8 bytes apart.
+  unsigned AlignLog2;
+
+  bool isSingleOffset() const {
+    return Bits.size() == 1;
+  }
+
+  bool isAllOnes() const {
+    return Bits.size() == BitSize;
+  }
+
+  bool containsGlobalOffset(uint64_t Offset) const;
+
+  void print(raw_ostream &OS) const;
+};
+
+struct BitSetBuilder {
+  SmallVector<uint64_t, 16> Offsets;
+  uint64_t Min = std::numeric_limits<uint64_t>::max();
+  uint64_t Max = 0;
+
+  BitSetBuilder() = default;
+
+  void addOffset(uint64_t Offset) {
+    if (Min > Offset)
+      Min = Offset;
+    if (Max < Offset)
+      Max = Offset;
+
+    Offsets.push_back(Offset);
+  }
+
+  BitSetInfo build();
+};
+
+/// This class implements a layout algorithm for globals referenced by bit sets
+/// that tries to keep members of small bit sets together. This can
+/// significantly reduce bit set sizes in many cases.
+///
+/// It works by assembling fragments of layout from sets of referenced globals.
+/// Each set of referenced globals causes the algorithm to create a new
+/// fragment, which is assembled by appending each referenced global in the set
+/// into the fragment. If a referenced global has already been referenced by an
+/// fragment created earlier, we instead delete that fragment and append its
+/// contents into the fragment we are assembling.
+///
+/// By starting with the smallest fragments, we minimize the size of the
+/// fragments that are copied into larger fragments. This is most intuitively
+/// thought about when considering the case where the globals are virtual tables
+/// and the bit sets represent their derived classes: in a single inheritance
+/// hierarchy, the optimum layout would involve a depth-first search of the
+/// class hierarchy (and in fact the computed layout ends up looking a lot like
+/// a DFS), but a naive DFS would not work well in the presence of multiple
+/// inheritance. This aspect of the algorithm ends up fitting smaller
+/// hierarchies inside larger ones where that would be beneficial.
+///
+/// For example, consider this class hierarchy:
+///
+/// A       B
+///   \   / | \
+///     C   D   E
+///
+/// We have five bit sets: bsA (A, C), bsB (B, C, D, E), bsC (C), bsD (D) and
+/// bsE (E). If we laid out our objects by DFS traversing B followed by A, our
+/// layout would be {B, C, D, E, A}. This is optimal for bsB as it needs to
+/// cover the only 4 objects in its hierarchy, but not for bsA as it needs to
+/// cover 5 objects, i.e. the entire layout. Our algorithm proceeds as follows:
+///
+/// Add bsC, fragments {{C}}
+/// Add bsD, fragments {{C}, {D}}
+/// Add bsE, fragments {{C}, {D}, {E}}
+/// Add bsA, fragments {{A, C}, {D}, {E}}
+/// Add bsB, fragments {{B, A, C, D, E}}
+///
+/// This layout is optimal for bsA, as it now only needs to cover two (i.e. 3
+/// fewer) objects, at the cost of bsB needing to cover 1 more object.
+///
+/// The bit set lowering pass assigns an object index to each object that needs
+/// to be laid out, and calls addFragment for each bit set passing the object
+/// indices of its referenced globals. It then assembles a layout from the
+/// computed layout in the Fragments field.
+struct GlobalLayoutBuilder {
+  /// The computed layout. Each element of this vector contains a fragment of
+  /// layout (which may be empty) consisting of object indices.
+  std::vector<std::vector<uint64_t>> Fragments;
+
+  /// Mapping from object index to fragment index.
+  std::vector<uint64_t> FragmentMap;
+
+  GlobalLayoutBuilder(uint64_t NumObjects)
+      : Fragments(1), FragmentMap(NumObjects) {}
+
+  /// Add F to the layout while trying to keep its indices contiguous.
+  /// If a previously seen fragment uses any of F's indices, that
+  /// fragment will be laid out inside F.
+  void addFragment(const std::set<uint64_t> &F);
+};
+
+/// This class is used to build a byte array containing overlapping bit sets. By
+/// loading from indexed offsets into the byte array and applying a mask, a
+/// program can test bits from the bit set with a relatively short instruction
+/// sequence. For example, suppose we have 15 bit sets to lay out:
+///
+/// A (16 bits), B (15 bits), C (14 bits), D (13 bits), E (12 bits),
+/// F (11 bits), G (10 bits), H (9 bits), I (7 bits), J (6 bits), K (5 bits),
+/// L (4 bits), M (3 bits), N (2 bits), O (1 bit)
+///
+/// These bits can be laid out in a 16-byte array like this:
+///
+///       Byte Offset
+///     0123456789ABCDEF
+/// Bit
+///   7 HHHHHHHHHIIIIIII
+///   6 GGGGGGGGGGJJJJJJ
+///   5 FFFFFFFFFFFKKKKK
+///   4 EEEEEEEEEEEELLLL
+///   3 DDDDDDDDDDDDDMMM
+///   2 CCCCCCCCCCCCCCNN
+///   1 BBBBBBBBBBBBBBBO
+///   0 AAAAAAAAAAAAAAAA
+///
+/// For example, to test bit X of A, we evaluate ((bits[X] & 1) != 0), or to
+/// test bit X of I, we evaluate ((bits[9 + X] & 0x80) != 0). This can be done
+/// in 1-2 machine instructions on x86, or 4-6 instructions on ARM.
+///
+/// This is a byte array, rather than (say) a 2-byte array or a 4-byte array,
+/// because for one thing it gives us better packing (the more bins there are,
+/// the less evenly they will be filled), and for another, the instruction
+/// sequences can be slightly shorter, both on x86 and ARM.
+struct ByteArrayBuilder {
+  /// The byte array built so far.
+  std::vector<uint8_t> Bytes;
+
+  enum { BitsPerByte = 8 };
+
+  /// The number of bytes allocated so far for each of the bits.
+  uint64_t BitAllocs[BitsPerByte];
+
+  ByteArrayBuilder() {
+    memset(BitAllocs, 0, sizeof(BitAllocs));
+  }
+
+  /// Allocate BitSize bits in the byte array where Bits contains the bits to
+  /// set. AllocByteOffset is set to the offset within the byte array and
+  /// AllocMask is set to the bitmask for those bits. This uses the LPT (Longest
+  /// Processing Time) multiprocessor scheduling algorithm to lay out the bits
+  /// efficiently; the pass allocates bit sets in decreasing size order.
+  void allocate(const std::set<uint64_t> &Bits, uint64_t BitSize,
+                uint64_t &AllocByteOffset, uint8_t &AllocMask);
+};
+
+} // end namespace lowertypetests
+
+class LowerTypeTestsPass : public PassInfoMixin<LowerTypeTestsPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_LOWERTYPETESTS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/PartialInlining.h b/linux-x64/clang/include/llvm/Transforms/IPO/PartialInlining.h
new file mode 100644
index 0000000..ec6dd36
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/PartialInlining.h
@@ -0,0 +1,32 @@
+//===- PartialInlining.h - Inline parts of functions ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass performs partial inlining, typically by inlining an if statement
+// that surrounds the body of the function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_PARTIALINLINING_H
+#define LLVM_TRANSFORMS_IPO_PARTIALINLINING_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+/// Pass to remove unused function declarations.
+class PartialInlinerPass : public PassInfoMixin<PartialInlinerPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_PARTIALINLINING_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/PassManagerBuilder.h b/linux-x64/clang/include/llvm/Transforms/IPO/PassManagerBuilder.h
new file mode 100644
index 0000000..276306f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/PassManagerBuilder.h
@@ -0,0 +1,217 @@
+// llvm/Transforms/IPO/PassManagerBuilder.h - Build Standard Pass -*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PassManagerBuilder class, which is used to set up a
+// "standard" optimization sequence suitable for languages like C and C++.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_PASSMANAGERBUILDER_H
+#define LLVM_TRANSFORMS_IPO_PASSMANAGERBUILDER_H
+
+#include <functional>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace llvm {
+class ModuleSummaryIndex;
+class Pass;
+class TargetLibraryInfoImpl;
+class TargetMachine;
+
+// The old pass manager infrastructure is hidden in a legacy namespace now.
+namespace legacy {
+class FunctionPassManager;
+class PassManagerBase;
+}
+
+/// PassManagerBuilder - This class is used to set up a standard optimization
+/// sequence for languages like C and C++, allowing some APIs to customize the
+/// pass sequence in various ways. A simple example of using it would be:
+///
+///  PassManagerBuilder Builder;
+///  Builder.OptLevel = 2;
+///  Builder.populateFunctionPassManager(FPM);
+///  Builder.populateModulePassManager(MPM);
+///
+/// In addition to setting up the basic passes, PassManagerBuilder allows
+/// frontends to vend a plugin API, where plugins are allowed to add extensions
+/// to the default pass manager.  They do this by specifying where in the pass
+/// pipeline they want to be added, along with a callback function that adds
+/// the pass(es).  For example, a plugin that wanted to add a loop optimization
+/// could do something like this:
+///
+/// static void addMyLoopPass(const PMBuilder &Builder, PassManagerBase &PM) {
+///   if (Builder.getOptLevel() > 2 && Builder.getOptSizeLevel() == 0)
+///     PM.add(createMyAwesomePass());
+/// }
+///   ...
+///   Builder.addExtension(PassManagerBuilder::EP_LoopOptimizerEnd,
+///                        addMyLoopPass);
+///   ...
+class PassManagerBuilder {
+public:
+  /// Extensions are passed the builder itself (so they can see how it is
+  /// configured) as well as the pass manager to add stuff to.
+  typedef std::function<void(const PassManagerBuilder &Builder,
+                             legacy::PassManagerBase &PM)>
+      ExtensionFn;
+  enum ExtensionPointTy {
+    /// EP_EarlyAsPossible - This extension point allows adding passes before
+    /// any other transformations, allowing them to see the code as it is coming
+    /// out of the frontend.
+    EP_EarlyAsPossible,
+
+    /// EP_ModuleOptimizerEarly - This extension point allows adding passes
+    /// just before the main module-level optimization passes.
+    EP_ModuleOptimizerEarly,
+
+    /// EP_LoopOptimizerEnd - This extension point allows adding loop passes to
+    /// the end of the loop optimizer.
+    EP_LoopOptimizerEnd,
+
+    /// EP_ScalarOptimizerLate - This extension point allows adding optimization
+    /// passes after most of the main optimizations, but before the last
+    /// cleanup-ish optimizations.
+    EP_ScalarOptimizerLate,
+
+    /// EP_OptimizerLast -- This extension point allows adding passes that
+    /// run after everything else.
+    EP_OptimizerLast,
+
+    /// EP_VectorizerStart - This extension point allows adding optimization
+    /// passes before the vectorizer and other highly target specific
+    /// optimization passes are executed.
+    EP_VectorizerStart,
+
+    /// EP_EnabledOnOptLevel0 - This extension point allows adding passes that
+    /// should not be disabled by O0 optimization level. The passes will be
+    /// inserted after the inlining pass.
+    EP_EnabledOnOptLevel0,
+
+    /// EP_Peephole - This extension point allows adding passes that perform
+    /// peephole optimizations similar to the instruction combiner. These passes
+    /// will be inserted after each instance of the instruction combiner pass.
+    EP_Peephole,
+
+    /// EP_LateLoopOptimizations - This extension point allows adding late loop
+    /// canonicalization and simplification passes. This is the last point in
+    /// the loop optimization pipeline before loop deletion. Each pass added
+    /// here must be an instance of LoopPass.
+    /// This is the place to add passes that can remove loops, such as target-
+    /// specific loop idiom recognition.
+    EP_LateLoopOptimizations,
+
+    /// EP_CGSCCOptimizerLate - This extension point allows adding CallGraphSCC
+    /// passes at the end of the main CallGraphSCC passes and before any
+    /// function simplification passes run by CGPassManager.
+    EP_CGSCCOptimizerLate,
+  };
+
+  /// The Optimization Level - Specify the basic optimization level.
+  ///    0 = -O0, 1 = -O1, 2 = -O2, 3 = -O3
+  unsigned OptLevel;
+
+  /// SizeLevel - How much we're optimizing for size.
+  ///    0 = none, 1 = -Os, 2 = -Oz
+  unsigned SizeLevel;
+
+  /// LibraryInfo - Specifies information about the runtime library for the
+  /// optimizer.  If this is non-null, it is added to both the function and
+  /// per-module pass pipeline.
+  TargetLibraryInfoImpl *LibraryInfo;
+
+  /// Inliner - Specifies the inliner to use.  If this is non-null, it is
+  /// added to the per-module passes.
+  Pass *Inliner;
+
+  /// The module summary index to use for exporting information from the
+  /// regular LTO phase, for example for the CFI and devirtualization type
+  /// tests.
+  ModuleSummaryIndex *ExportSummary = nullptr;
+
+  /// The module summary index to use for importing information to the
+  /// thin LTO backends, for example for the CFI and devirtualization type
+  /// tests.
+  const ModuleSummaryIndex *ImportSummary = nullptr;
+
+  bool DisableTailCalls;
+  bool DisableUnitAtATime;
+  bool DisableUnrollLoops;
+  bool SLPVectorize;
+  bool LoopVectorize;
+  bool RerollLoops;
+  bool NewGVN;
+  bool DisableGVNLoadPRE;
+  bool VerifyInput;
+  bool VerifyOutput;
+  bool MergeFunctions;
+  bool PrepareForLTO;
+  bool PrepareForThinLTO;
+  bool PerformThinLTO;
+  bool DivergentTarget;
+
+  /// Enable profile instrumentation pass.
+  bool EnablePGOInstrGen;
+  /// Profile data file name that the instrumentation will be written to.
+  std::string PGOInstrGen;
+  /// Path of the profile data file.
+  std::string PGOInstrUse;
+  /// Path of the sample Profile data file.
+  std::string PGOSampleUse;
+
+private:
+  /// ExtensionList - This is list of all of the extensions that are registered.
+  std::vector<std::pair<ExtensionPointTy, ExtensionFn>> Extensions;
+
+public:
+  PassManagerBuilder();
+  ~PassManagerBuilder();
+  /// Adds an extension that will be used by all PassManagerBuilder instances.
+  /// This is intended to be used by plugins, to register a set of
+  /// optimisations to run automatically.
+  static void addGlobalExtension(ExtensionPointTy Ty, ExtensionFn Fn);
+  void addExtension(ExtensionPointTy Ty, ExtensionFn Fn);
+
+private:
+  void addExtensionsToPM(ExtensionPointTy ETy,
+                         legacy::PassManagerBase &PM) const;
+  void addInitialAliasAnalysisPasses(legacy::PassManagerBase &PM) const;
+  void addLTOOptimizationPasses(legacy::PassManagerBase &PM);
+  void addLateLTOOptimizationPasses(legacy::PassManagerBase &PM);
+  void addPGOInstrPasses(legacy::PassManagerBase &MPM);
+  void addFunctionSimplificationPasses(legacy::PassManagerBase &MPM);
+  void addInstructionCombiningPass(legacy::PassManagerBase &MPM) const;
+
+public:
+  /// populateFunctionPassManager - This fills in the function pass manager,
+  /// which is expected to be run on each function immediately as it is
+  /// generated.  The idea is to reduce the size of the IR in memory.
+  void populateFunctionPassManager(legacy::FunctionPassManager &FPM);
+
+  /// populateModulePassManager - This sets up the primary pass manager.
+  void populateModulePassManager(legacy::PassManagerBase &MPM);
+  void populateLTOPassManager(legacy::PassManagerBase &PM);
+  void populateThinLTOPassManager(legacy::PassManagerBase &PM);
+};
+
+/// Registers a function for adding a standard set of passes.  This should be
+/// used by optimizer plugins to allow all front ends to transparently use
+/// them.  Create a static instance of this class in your plugin, providing a
+/// private function that the PassManagerBuilder can use to add your passes.
+struct RegisterStandardPasses {
+  RegisterStandardPasses(PassManagerBuilder::ExtensionPointTy Ty,
+                         PassManagerBuilder::ExtensionFn Fn) {
+    PassManagerBuilder::addGlobalExtension(Ty, std::move(Fn));
+  }
+};
+
+} // end namespace llvm
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/SCCP.h b/linux-x64/clang/include/llvm/Transforms/IPO/SCCP.h
new file mode 100644
index 0000000..fdb7865
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/SCCP.h
@@ -0,0 +1,38 @@
+//===- SCCP.h - Sparse Conditional Constant Propagation ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass implements  interprocedural sparse conditional constant
+// propagation and merging.
+//
+// Specifically, this:
+//   * Assumes values are constant unless proven otherwise
+//   * Assumes BasicBlocks are dead unless proven otherwise
+//   * Proves values to be constant, and replaces them with constants
+//   * Proves conditional branches to be unconditional
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_SCCP_H
+#define LLVM_TRANSFORMS_IPO_SCCP_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+/// Pass to perform interprocedural constant propagation.
+class IPSCCPPass : public PassInfoMixin<IPSCCPPass> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_SCCP_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/SampleProfile.h b/linux-x64/clang/include/llvm/Transforms/IPO/SampleProfile.h
new file mode 100644
index 0000000..cd5a056
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/SampleProfile.h
@@ -0,0 +1,40 @@
+//===- SampleProfile.h - SamplePGO pass ---------- --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file provides the interface for the sampled PGO loader pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_SAMPLEPROFILE_H
+#define LLVM_TRANSFORMS_IPO_SAMPLEPROFILE_H
+
+#include "llvm/IR/PassManager.h"
+#include <string>
+
+namespace llvm {
+
+class Module;
+
+/// The sample profiler data loader pass.
+class SampleProfileLoaderPass : public PassInfoMixin<SampleProfileLoaderPass> {
+public:
+  SampleProfileLoaderPass(std::string File = "", bool IsThinLTOPreLink = false)
+      : ProfileFileName(File), IsThinLTOPreLink(IsThinLTOPreLink) {}
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+
+private:
+  std::string ProfileFileName;
+  bool IsThinLTOPreLink;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SAMPLEPROFILE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/StripDeadPrototypes.h b/linux-x64/clang/include/llvm/Transforms/IPO/StripDeadPrototypes.h
new file mode 100644
index 0000000..5a05cd7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/StripDeadPrototypes.h
@@ -0,0 +1,32 @@
+//===-- StripDeadPrototypes.h - Remove unused function declarations -------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass loops over all of the functions in the input module, looking for
+// dead declarations and removes them. Dead declarations are declarations of
+// functions for which no implementation is available (i.e., declarations for
+// unused library functions).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_STRIPDEADPROTOTYPES_H
+#define LLVM_TRANSFORMS_IPO_STRIPDEADPROTOTYPES_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Pass to remove unused function declarations.
+struct StripDeadPrototypesPass : PassInfoMixin<StripDeadPrototypesPass> {
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+};
+
+}
+
+#endif // LLVM_TRANSFORMS_IPO_STRIPDEADPROTOTYPES_H
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/SyntheticCountsPropagation.h b/linux-x64/clang/include/llvm/Transforms/IPO/SyntheticCountsPropagation.h
new file mode 100644
index 0000000..0b3ba86
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/SyntheticCountsPropagation.h
@@ -0,0 +1,19 @@
+#ifndef LLVM_TRANSFORMS_IPO_SYNTHETIC_COUNTS_PROPAGATION_H
+#define LLVM_TRANSFORMS_IPO_SYNTHETIC_COUNTS_PROPAGATION_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/ScaledNumber.h"
+
+namespace llvm {
+class Function;
+class Module;
+
+class SyntheticCountsPropagation
+    : public PassInfoMixin<SyntheticCountsPropagation> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
+};
+} // namespace llvm
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/ThinLTOBitcodeWriter.h b/linux-x64/clang/include/llvm/Transforms/IPO/ThinLTOBitcodeWriter.h
new file mode 100644
index 0000000..bf04bbf
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/ThinLTOBitcodeWriter.h
@@ -0,0 +1,41 @@
+//===- ThinLTOBitcodeWriter.h - Bitcode writing pass for ThinLTO ----------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass prepares a module containing type metadata for ThinLTO by splitting
+// it into regular and thin LTO parts if possible, and writing both parts to
+// a multi-module bitcode file. Modules that do not contain type metadata are
+// written unmodified as a single module.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_THINLTOBITCODEWRITER_H
+#define LLVM_TRANSFORMS_IPO_THINLTOBITCODEWRITER_H
+
+#include <llvm/IR/PassManager.h>
+#include <llvm/Support/raw_ostream.h>
+
+namespace llvm {
+
+class ThinLTOBitcodeWriterPass
+    : public PassInfoMixin<ThinLTOBitcodeWriterPass> {
+  raw_ostream &OS;
+  raw_ostream *ThinLinkOS;
+
+public:
+  // Writes bitcode to OS. Also write thin link file to ThinLinkOS, if
+  // it's not nullptr.
+  ThinLTOBitcodeWriterPass(raw_ostream &OS, raw_ostream *ThinLinkOS)
+      : OS(OS), ThinLinkOS(ThinLinkOS) {}
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/IPO/WholeProgramDevirt.h b/linux-x64/clang/include/llvm/Transforms/IPO/WholeProgramDevirt.h
new file mode 100644
index 0000000..1aa4c6f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/IPO/WholeProgramDevirt.h
@@ -0,0 +1,226 @@
+//===- WholeProgramDevirt.h - Whole-program devirt pass ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines parts of the whole-program devirtualization pass
+// implementation that may be usefully unit tested.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_WHOLEPROGRAMDEVIRT_H
+#define LLVM_TRANSFORMS_IPO_WHOLEPROGRAMDEVIRT_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include <cassert>
+#include <cstdint>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+template <typename T> class MutableArrayRef;
+class Function;
+class GlobalVariable;
+
+namespace wholeprogramdevirt {
+
+// A bit vector that keeps track of which bits are used. We use this to
+// pack constant values compactly before and after each virtual table.
+struct AccumBitVector {
+  std::vector<uint8_t> Bytes;
+
+  // Bits in BytesUsed[I] are 1 if matching bit in Bytes[I] is used, 0 if not.
+  std::vector<uint8_t> BytesUsed;
+
+  std::pair<uint8_t *, uint8_t *> getPtrToData(uint64_t Pos, uint8_t Size) {
+    if (Bytes.size() < Pos + Size) {
+      Bytes.resize(Pos + Size);
+      BytesUsed.resize(Pos + Size);
+    }
+    return std::make_pair(Bytes.data() + Pos, BytesUsed.data() + Pos);
+  }
+
+  // Set little-endian value Val with size Size at bit position Pos,
+  // and mark bytes as used.
+  void setLE(uint64_t Pos, uint64_t Val, uint8_t Size) {
+    assert(Pos % 8 == 0);
+    auto DataUsed = getPtrToData(Pos / 8, Size);
+    for (unsigned I = 0; I != Size; ++I) {
+      DataUsed.first[I] = Val >> (I * 8);
+      assert(!DataUsed.second[I]);
+      DataUsed.second[I] = 0xff;
+    }
+  }
+
+  // Set big-endian value Val with size Size at bit position Pos,
+  // and mark bytes as used.
+  void setBE(uint64_t Pos, uint64_t Val, uint8_t Size) {
+    assert(Pos % 8 == 0);
+    auto DataUsed = getPtrToData(Pos / 8, Size);
+    for (unsigned I = 0; I != Size; ++I) {
+      DataUsed.first[Size - I - 1] = Val >> (I * 8);
+      assert(!DataUsed.second[Size - I - 1]);
+      DataUsed.second[Size - I - 1] = 0xff;
+    }
+  }
+
+  // Set bit at bit position Pos to b and mark bit as used.
+  void setBit(uint64_t Pos, bool b) {
+    auto DataUsed = getPtrToData(Pos / 8, 1);
+    if (b)
+      *DataUsed.first |= 1 << (Pos % 8);
+    assert(!(*DataUsed.second & (1 << Pos % 8)));
+    *DataUsed.second |= 1 << (Pos % 8);
+  }
+};
+
+// The bits that will be stored before and after a particular vtable.
+struct VTableBits {
+  // The vtable global.
+  GlobalVariable *GV;
+
+  // Cache of the vtable's size in bytes.
+  uint64_t ObjectSize = 0;
+
+  // The bit vector that will be laid out before the vtable. Note that these
+  // bytes are stored in reverse order until the globals are rebuilt. This means
+  // that any values in the array must be stored using the opposite endianness
+  // from the target.
+  AccumBitVector Before;
+
+  // The bit vector that will be laid out after the vtable.
+  AccumBitVector After;
+};
+
+// Information about a member of a particular type identifier.
+struct TypeMemberInfo {
+  // The VTableBits for the vtable.
+  VTableBits *Bits;
+
+  // The offset in bytes from the start of the vtable (i.e. the address point).
+  uint64_t Offset;
+
+  bool operator<(const TypeMemberInfo &other) const {
+    return Bits < other.Bits || (Bits == other.Bits && Offset < other.Offset);
+  }
+};
+
+// A virtual call target, i.e. an entry in a particular vtable.
+struct VirtualCallTarget {
+  VirtualCallTarget(Function *Fn, const TypeMemberInfo *TM);
+
+  // For testing only.
+  VirtualCallTarget(const TypeMemberInfo *TM, bool IsBigEndian)
+      : Fn(nullptr), TM(TM), IsBigEndian(IsBigEndian), WasDevirt(false) {}
+
+  // The function stored in the vtable.
+  Function *Fn;
+
+  // A pointer to the type identifier member through which the pointer to Fn is
+  // accessed.
+  const TypeMemberInfo *TM;
+
+  // When doing virtual constant propagation, this stores the return value for
+  // the function when passed the currently considered argument list.
+  uint64_t RetVal;
+
+  // Whether the target is big endian.
+  bool IsBigEndian;
+
+  // Whether at least one call site to the target was devirtualized.
+  bool WasDevirt;
+
+  // The minimum byte offset before the address point. This covers the bytes in
+  // the vtable object before the address point (e.g. RTTI, access-to-top,
+  // vtables for other base classes) and is equal to the offset from the start
+  // of the vtable object to the address point.
+  uint64_t minBeforeBytes() const { return TM->Offset; }
+
+  // The minimum byte offset after the address point. This covers the bytes in
+  // the vtable object after the address point (e.g. the vtable for the current
+  // class and any later base classes) and is equal to the size of the vtable
+  // object minus the offset from the start of the vtable object to the address
+  // point.
+  uint64_t minAfterBytes() const { return TM->Bits->ObjectSize - TM->Offset; }
+
+  // The number of bytes allocated (for the vtable plus the byte array) before
+  // the address point.
+  uint64_t allocatedBeforeBytes() const {
+    return minBeforeBytes() + TM->Bits->Before.Bytes.size();
+  }
+
+  // The number of bytes allocated (for the vtable plus the byte array) after
+  // the address point.
+  uint64_t allocatedAfterBytes() const {
+    return minAfterBytes() + TM->Bits->After.Bytes.size();
+  }
+
+  // Set the bit at position Pos before the address point to RetVal.
+  void setBeforeBit(uint64_t Pos) {
+    assert(Pos >= 8 * minBeforeBytes());
+    TM->Bits->Before.setBit(Pos - 8 * minBeforeBytes(), RetVal);
+  }
+
+  // Set the bit at position Pos after the address point to RetVal.
+  void setAfterBit(uint64_t Pos) {
+    assert(Pos >= 8 * minAfterBytes());
+    TM->Bits->After.setBit(Pos - 8 * minAfterBytes(), RetVal);
+  }
+
+  // Set the bytes at position Pos before the address point to RetVal.
+  // Because the bytes in Before are stored in reverse order, we use the
+  // opposite endianness to the target.
+  void setBeforeBytes(uint64_t Pos, uint8_t Size) {
+    assert(Pos >= 8 * minBeforeBytes());
+    if (IsBigEndian)
+      TM->Bits->Before.setLE(Pos - 8 * minBeforeBytes(), RetVal, Size);
+    else
+      TM->Bits->Before.setBE(Pos - 8 * minBeforeBytes(), RetVal, Size);
+  }
+
+  // Set the bytes at position Pos after the address point to RetVal.
+  void setAfterBytes(uint64_t Pos, uint8_t Size) {
+    assert(Pos >= 8 * minAfterBytes());
+    if (IsBigEndian)
+      TM->Bits->After.setBE(Pos - 8 * minAfterBytes(), RetVal, Size);
+    else
+      TM->Bits->After.setLE(Pos - 8 * minAfterBytes(), RetVal, Size);
+  }
+};
+
+// Find the minimum offset that we may store a value of size Size bits at. If
+// IsAfter is set, look for an offset before the object, otherwise look for an
+// offset after the object.
+uint64_t findLowestOffset(ArrayRef<VirtualCallTarget> Targets, bool IsAfter,
+                          uint64_t Size);
+
+// Set the stored value in each of Targets to VirtualCallTarget::RetVal at the
+// given allocation offset before the vtable address. Stores the computed
+// byte/bit offset to OffsetByte/OffsetBit.
+void setBeforeReturnValues(MutableArrayRef<VirtualCallTarget> Targets,
+                           uint64_t AllocBefore, unsigned BitWidth,
+                           int64_t &OffsetByte, uint64_t &OffsetBit);
+
+// Set the stored value in each of Targets to VirtualCallTarget::RetVal at the
+// given allocation offset after the vtable address. Stores the computed
+// byte/bit offset to OffsetByte/OffsetBit.
+void setAfterReturnValues(MutableArrayRef<VirtualCallTarget> Targets,
+                          uint64_t AllocAfter, unsigned BitWidth,
+                          int64_t &OffsetByte, uint64_t &OffsetBit);
+
+} // end namespace wholeprogramdevirt
+
+struct WholeProgramDevirtPass : public PassInfoMixin<WholeProgramDevirtPass> {
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_WHOLEPROGRAMDEVIRT_H
diff --git a/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombine.h b/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombine.h
new file mode 100644
index 0000000..6bd22dc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombine.h
@@ -0,0 +1,61 @@
+//===- InstCombine.h - InstCombine pass -------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides the primary interface to the instcombine pass. This pass
+/// is suitable for use in the new pass manager. For a pass that works with the
+/// legacy pass manager, please look for \c createInstructionCombiningPass() in
+/// Scalar.h.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINE_H
+#define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINE_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
+
+namespace llvm {
+
+class InstCombinePass : public PassInfoMixin<InstCombinePass> {
+  InstCombineWorklist Worklist;
+  bool ExpensiveCombines;
+
+public:
+  static StringRef name() { return "InstCombinePass"; }
+
+  explicit InstCombinePass(bool ExpensiveCombines = true)
+      : ExpensiveCombines(ExpensiveCombines) {}
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief The legacy pass manager's instcombine pass.
+///
+/// This is a basic whole-function wrapper around the instcombine utility. It
+/// will try to combine all instructions in the function.
+class InstructionCombiningPass : public FunctionPass {
+  InstCombineWorklist Worklist;
+  const bool ExpensiveCombines;
+
+public:
+  static char ID; // Pass identification, replacement for typeid
+
+  InstructionCombiningPass(bool ExpensiveCombines = true)
+      : FunctionPass(ID), ExpensiveCombines(ExpensiveCombines) {
+    initializeInstructionCombiningPassPass(*PassRegistry::getPassRegistry());
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+  bool runOnFunction(Function &F) override;
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombineWorklist.h b/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
new file mode 100644
index 0000000..271e891
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
@@ -0,0 +1,109 @@
+//===- InstCombineWorklist.h - Worklist for InstCombine pass ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINEWORKLIST_H
+#define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINEWORKLIST_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "instcombine"
+
+namespace llvm {
+
+/// InstCombineWorklist - This is the worklist management logic for
+/// InstCombine.
+class InstCombineWorklist {
+  SmallVector<Instruction*, 256> Worklist;
+  DenseMap<Instruction*, unsigned> WorklistMap;
+
+public:
+  InstCombineWorklist() = default;
+
+  InstCombineWorklist(InstCombineWorklist &&) = default;
+  InstCombineWorklist &operator=(InstCombineWorklist &&) = default;
+
+  bool isEmpty() const { return Worklist.empty(); }
+
+  /// Add - Add the specified instruction to the worklist if it isn't already
+  /// in it.
+  void Add(Instruction *I) {
+    if (WorklistMap.insert(std::make_pair(I, Worklist.size())).second) {
+      DEBUG(dbgs() << "IC: ADD: " << *I << '\n');
+      Worklist.push_back(I);
+    }
+  }
+
+  void AddValue(Value *V) {
+    if (Instruction *I = dyn_cast<Instruction>(V))
+      Add(I);
+  }
+
+  /// AddInitialGroup - Add the specified batch of stuff in reverse order.
+  /// which should only be done when the worklist is empty and when the group
+  /// has no duplicates.
+  void AddInitialGroup(ArrayRef<Instruction *> List) {
+    assert(Worklist.empty() && "Worklist must be empty to add initial group");
+    Worklist.reserve(List.size()+16);
+    WorklistMap.reserve(List.size());
+    DEBUG(dbgs() << "IC: ADDING: " << List.size() << " instrs to worklist\n");
+    unsigned Idx = 0;
+    for (Instruction *I : reverse(List)) {
+      WorklistMap.insert(std::make_pair(I, Idx++));
+      Worklist.push_back(I);
+    }
+  }
+
+  // Remove - remove I from the worklist if it exists.
+  void Remove(Instruction *I) {
+    DenseMap<Instruction*, unsigned>::iterator It = WorklistMap.find(I);
+    if (It == WorklistMap.end()) return; // Not in worklist.
+
+    // Don't bother moving everything down, just null out the slot.
+    Worklist[It->second] = nullptr;
+
+    WorklistMap.erase(It);
+  }
+
+  Instruction *RemoveOne() {
+    Instruction *I = Worklist.pop_back_val();
+    WorklistMap.erase(I);
+    return I;
+  }
+
+  /// AddUsersToWorkList - When an instruction is simplified, add all users of
+  /// the instruction to the work lists because they might get more simplified
+  /// now.
+  ///
+  void AddUsersToWorkList(Instruction &I) {
+    for (User *U : I.users())
+      Add(cast<Instruction>(U));
+  }
+
+
+  /// Zap - check that the worklist is empty and nuke the backing store for
+  /// the map if it is large.
+  void Zap() {
+    assert(WorklistMap.empty() && "Worklist empty, but map not?");
+
+    // Do an explicit clear, this shrinks the map if needed.
+    WorklistMap.clear();
+  }
+};
+
+} // end namespace llvm.
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation.h
new file mode 100644
index 0000000..b1e13f1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation.h
@@ -0,0 +1,211 @@
+//===- Transforms/Instrumentation.h - Instrumentation passes ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines constructor functions for instrumentation passes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_H
+#define LLVM_TRANSFORMS_INSTRUMENTATION_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/BasicBlock.h"
+#include <cassert>
+#include <cstdint>
+#include <limits>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class FunctionPass;
+class ModulePass;
+class OptimizationRemarkEmitter;
+
+/// Instrumentation passes often insert conditional checks into entry blocks.
+/// Call this function before splitting the entry block to move instructions
+/// that must remain in the entry block up before the split point. Static
+/// allocas and llvm.localescape calls, for example, must remain in the entry
+/// block.
+BasicBlock::iterator PrepareToSplitEntryBlock(BasicBlock &BB,
+                                              BasicBlock::iterator IP);
+
+// Insert GCOV profiling instrumentation
+struct GCOVOptions {
+  static GCOVOptions getDefault();
+
+  // Specify whether to emit .gcno files.
+  bool EmitNotes;
+
+  // Specify whether to modify the program to emit .gcda files when run.
+  bool EmitData;
+
+  // A four-byte version string. The meaning of a version string is described in
+  // gcc's gcov-io.h
+  char Version[4];
+
+  // Emit a "cfg checksum" that follows the "line number checksum" of a
+  // function. This affects both .gcno and .gcda files.
+  bool UseCfgChecksum;
+
+  // Add the 'noredzone' attribute to added runtime library calls.
+  bool NoRedZone;
+
+  // Emit the name of the function in the .gcda files. This is redundant, as
+  // the function identifier can be used to find the name from the .gcno file.
+  bool FunctionNamesInData;
+
+  // Emit the exit block immediately after the start block, rather than after
+  // all of the function body's blocks.
+  bool ExitBlockBeforeBody;
+};
+
+ModulePass *createGCOVProfilerPass(const GCOVOptions &Options =
+                                   GCOVOptions::getDefault());
+
+// PGO Instrumention
+ModulePass *createPGOInstrumentationGenLegacyPass();
+ModulePass *
+createPGOInstrumentationUseLegacyPass(StringRef Filename = StringRef(""));
+ModulePass *createPGOIndirectCallPromotionLegacyPass(bool InLTO = false,
+                                                     bool SamplePGO = false);
+FunctionPass *createPGOMemOPSizeOptLegacyPass();
+
+// The pgo-specific indirect call promotion function declared below is used by
+// the pgo-driven indirect call promotion and sample profile passes. It's a
+// wrapper around llvm::promoteCall, et al. that additionally computes !prof
+// metadata. We place it in a pgo namespace so it's not confused with the
+// generic utilities.
+namespace pgo {
+
+// Helper function that transforms Inst (either an indirect-call instruction, or
+// an invoke instruction , to a conditional call to F. This is like:
+//     if (Inst.CalledValue == F)
+//        F(...);
+//     else
+//        Inst(...);
+//     end
+// TotalCount is the profile count value that the instruction executes.
+// Count is the profile count value that F is the target function.
+// These two values are used to update the branch weight.
+// If \p AttachProfToDirectCall is true, a prof metadata is attached to the
+// new direct call to contain \p Count.
+// Returns the promoted direct call instruction.
+Instruction *promoteIndirectCall(Instruction *Inst, Function *F, uint64_t Count,
+                                 uint64_t TotalCount,
+                                 bool AttachProfToDirectCall,
+                                 OptimizationRemarkEmitter *ORE);
+} // namespace pgo
+
+/// Options for the frontend instrumentation based profiling pass.
+struct InstrProfOptions {
+  // Add the 'noredzone' attribute to added runtime library calls.
+  bool NoRedZone = false;
+
+  // Do counter register promotion
+  bool DoCounterPromotion = false;
+
+  // Name of the profile file to use as output
+  std::string InstrProfileOutput;
+
+  InstrProfOptions() = default;
+};
+
+/// Insert frontend instrumentation based profiling.
+ModulePass *createInstrProfilingLegacyPass(
+    const InstrProfOptions &Options = InstrProfOptions());
+
+// Insert AddressSanitizer (address sanity checking) instrumentation
+FunctionPass *createAddressSanitizerFunctionPass(bool CompileKernel = false,
+                                                 bool Recover = false,
+                                                 bool UseAfterScope = false);
+ModulePass *createAddressSanitizerModulePass(bool CompileKernel = false,
+                                             bool Recover = false,
+                                             bool UseGlobalsGC = true);
+
+// Insert MemorySanitizer instrumentation (detection of uninitialized reads)
+FunctionPass *createMemorySanitizerPass(int TrackOrigins = 0,
+                                        bool Recover = false);
+
+FunctionPass *createHWAddressSanitizerPass(bool Recover = false);
+
+// Insert ThreadSanitizer (race detection) instrumentation
+FunctionPass *createThreadSanitizerPass();
+
+// Insert DataFlowSanitizer (dynamic data flow analysis) instrumentation
+ModulePass *createDataFlowSanitizerPass(
+    const std::vector<std::string> &ABIListFiles = std::vector<std::string>(),
+    void *(*getArgTLS)() = nullptr, void *(*getRetValTLS)() = nullptr);
+
+// Options for EfficiencySanitizer sub-tools.
+struct EfficiencySanitizerOptions {
+  enum Type {
+    ESAN_None = 0,
+    ESAN_CacheFrag,
+    ESAN_WorkingSet,
+  } ToolType = ESAN_None;
+
+  EfficiencySanitizerOptions() = default;
+};
+
+// Insert EfficiencySanitizer instrumentation.
+ModulePass *createEfficiencySanitizerPass(
+    const EfficiencySanitizerOptions &Options = EfficiencySanitizerOptions());
+
+// Options for sanitizer coverage instrumentation.
+struct SanitizerCoverageOptions {
+  enum Type {
+    SCK_None = 0,
+    SCK_Function,
+    SCK_BB,
+    SCK_Edge
+  } CoverageType = SCK_None;
+  bool IndirectCalls = false;
+  bool TraceBB = false;
+  bool TraceCmp = false;
+  bool TraceDiv = false;
+  bool TraceGep = false;
+  bool Use8bitCounters = false;
+  bool TracePC = false;
+  bool TracePCGuard = false;
+  bool Inline8bitCounters = false;
+  bool PCTable = false;
+  bool NoPrune = false;
+  bool StackDepth = false;
+
+  SanitizerCoverageOptions() = default;
+};
+
+// Insert SanitizerCoverage instrumentation.
+ModulePass *createSanitizerCoverageModulePass(
+    const SanitizerCoverageOptions &Options = SanitizerCoverageOptions());
+
+/// \brief Calculate what to divide by to scale counts.
+///
+/// Given the maximum count, calculate a divisor that will scale all the
+/// weights to strictly less than std::numeric_limits<uint32_t>::max().
+static inline uint64_t calculateCountScale(uint64_t MaxCount) {
+  return MaxCount < std::numeric_limits<uint32_t>::max()
+             ? 1
+             : MaxCount / std::numeric_limits<uint32_t>::max() + 1;
+}
+
+/// \brief Scale an individual branch count.
+///
+/// Scale a 64-bit weight down to 32-bits using \c Scale.
+///
+static inline uint32_t scaleBranchCount(uint64_t Count, uint64_t Scale) {
+  uint64_t Scaled = Count / Scale;
+  assert(Scaled <= std::numeric_limits<uint32_t>::max() && "overflow 32-bits");
+  return Scaled;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_INSTRUMENTATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/BoundsChecking.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/BoundsChecking.h
new file mode 100644
index 0000000..3d4f62c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/BoundsChecking.h
@@ -0,0 +1,29 @@
+//===- BoundsChecking.h - Bounds checking instrumentation -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_BOUNDSCHECKING_H
+#define LLVM_TRANSFORMS_INSTRUMENTATION_BOUNDSCHECKING_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// A pass to instrument code and perform run-time bounds checking on loads,
+/// stores, and other memory intrinsics.
+struct BoundsCheckingPass : PassInfoMixin<BoundsCheckingPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+
+/// Legacy pass creation function for the above pass.
+FunctionPass *createBoundsCheckingLegacyPass();
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_INSTRUMENTATION_BOUNDSCHECKING_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/GCOVProfiler.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/GCOVProfiler.h
new file mode 100644
index 0000000..dd55fbe
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/GCOVProfiler.h
@@ -0,0 +1,31 @@
+//===- Transforms/Instrumentation/GCOVProfiler.h ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for the GCOV style profiler  pass.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_GCOVPROFILER_H
+#define LLVM_TRANSFORMS_GCOVPROFILER_H
+
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Instrumentation.h"
+
+namespace llvm {
+/// The gcov-style instrumentation pass
+class GCOVProfilerPass : public PassInfoMixin<GCOVProfilerPass> {
+public:
+  GCOVProfilerPass(const GCOVOptions &Options = GCOVOptions::getDefault()) : GCOVOpts(Options) { }
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+
+private:
+  GCOVOptions GCOVOpts;
+};
+
+} // End llvm namespace
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/InstrProfiling.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/InstrProfiling.h
new file mode 100644
index 0000000..13fb3db
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/InstrProfiling.h
@@ -0,0 +1,125 @@
+//===- Transforms/Instrumentation/InstrProfiling.h --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for LLVM's PGO Instrumentation lowering
+/// pass.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTRPROFILING_H
+#define LLVM_TRANSFORMS_INSTRPROFILING_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/Transforms/Instrumentation.h"
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <vector>
+
+namespace llvm {
+
+class TargetLibraryInfo;
+using LoadStorePair = std::pair<Instruction *, Instruction *>;
+
+/// Instrumentation based profiling lowering pass. This pass lowers
+/// the profile instrumented code generated by FE or the IR based
+/// instrumentation pass.
+class InstrProfiling : public PassInfoMixin<InstrProfiling> {
+public:
+  InstrProfiling() = default;
+  InstrProfiling(const InstrProfOptions &Options) : Options(Options) {}
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  bool run(Module &M, const TargetLibraryInfo &TLI);
+
+private:
+  InstrProfOptions Options;
+  Module *M;
+  Triple TT;
+  const TargetLibraryInfo *TLI;
+  struct PerFunctionProfileData {
+    uint32_t NumValueSites[IPVK_Last + 1];
+    GlobalVariable *RegionCounters = nullptr;
+    GlobalVariable *DataVar = nullptr;
+
+    PerFunctionProfileData() {
+      memset(NumValueSites, 0, sizeof(uint32_t) * (IPVK_Last + 1));
+    }
+  };
+  DenseMap<GlobalVariable *, PerFunctionProfileData> ProfileDataMap;
+  std::vector<GlobalValue *> UsedVars;
+  std::vector<GlobalVariable *> ReferencedNames;
+  GlobalVariable *NamesVar;
+  size_t NamesSize;
+
+  // vector of counter load/store pairs to be register promoted.
+  std::vector<LoadStorePair> PromotionCandidates;
+
+  // The start value of precise value profile range for memory intrinsic sizes.
+  int64_t MemOPSizeRangeStart;
+  // The end value of precise value profile range for memory intrinsic sizes.
+  int64_t MemOPSizeRangeLast;
+
+  int64_t TotalCountersPromoted = 0;
+
+  /// Lower instrumentation intrinsics in the function. Returns true if there
+  /// any lowering.
+  bool lowerIntrinsics(Function *F);
+
+  /// Register-promote counter loads and stores in loops.
+  void promoteCounterLoadStores(Function *F);
+
+  /// Returns true if profile counter update register promotion is enabled.
+  bool isCounterPromotionEnabled() const;
+
+  /// Count the number of instrumented value sites for the function.
+  void computeNumValueSiteCounts(InstrProfValueProfileInst *Ins);
+
+  /// Replace instrprof_value_profile with a call to runtime library.
+  void lowerValueProfileInst(InstrProfValueProfileInst *Ins);
+
+  /// Replace instrprof_increment with an increment of the appropriate value.
+  void lowerIncrement(InstrProfIncrementInst *Inc);
+
+  /// Force emitting of name vars for unused functions.
+  void lowerCoverageData(GlobalVariable *CoverageNamesVar);
+
+  /// Get the region counters for an increment, creating them if necessary.
+  ///
+  /// If the counter array doesn't yet exist, the profile data variables
+  /// referring to them will also be created.
+  GlobalVariable *getOrCreateRegionCounters(InstrProfIncrementInst *Inc);
+
+  /// Emit the section with compressed function names.
+  void emitNameData();
+
+  /// Emit value nodes section for value profiling.
+  void emitVNodes();
+
+  /// Emit runtime registration functions for each profile data variable.
+  void emitRegistration();
+
+  /// Emit the necessary plumbing to pull in the runtime initialization.
+  /// Returns true if a change was made.
+  bool emitRuntimeHook();
+
+  /// Add uses of our data variables and runtime hook.
+  void emitUses();
+
+  /// Create a static initializer for our data, on platforms that need it,
+  /// and for any profile output file that was specified.
+  void emitInitialization();
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_INSTRPROFILING_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Instrumentation/PGOInstrumentation.h b/linux-x64/clang/include/llvm/Transforms/Instrumentation/PGOInstrumentation.h
new file mode 100644
index 0000000..c0b37c4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Instrumentation/PGOInstrumentation.h
@@ -0,0 +1,75 @@
+//===- Transforms/Instrumentation/PGOInstrumentation.h ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file provides the interface for IR based instrumentation passes (
+/// (profile-gen, and profile-use).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_PGOINSTRUMENTATION_H
+#define LLVM_TRANSFORMS_PGOINSTRUMENTATION_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/PassManager.h"
+#include <cstdint>
+#include <string>
+
+namespace llvm {
+
+class Function;
+class Instruction;
+class Module;
+
+/// The instrumentation (profile-instr-gen) pass for IR based PGO.
+class PGOInstrumentationGen : public PassInfoMixin<PGOInstrumentationGen> {
+public:
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+/// The profile annotation (profile-instr-use) pass for IR based PGO.
+class PGOInstrumentationUse : public PassInfoMixin<PGOInstrumentationUse> {
+public:
+  PGOInstrumentationUse(std::string Filename = "");
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+
+private:
+  std::string ProfileFileName;
+};
+
+/// The indirect function call promotion pass.
+class PGOIndirectCallPromotion : public PassInfoMixin<PGOIndirectCallPromotion> {
+public:
+  PGOIndirectCallPromotion(bool IsInLTO = false, bool SamplePGO = false)
+      : InLTO(IsInLTO), SamplePGO(SamplePGO) {}
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+
+private:
+  bool InLTO;
+  bool SamplePGO;
+};
+
+/// The profile size based optimization pass for memory intrinsics.
+class PGOMemOPSizeOpt : public PassInfoMixin<PGOMemOPSizeOpt> {
+public:
+  PGOMemOPSizeOpt() = default;
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+void setProfMetadata(Module *M, Instruction *TI, ArrayRef<uint64_t> EdgeCounts,
+                     uint64_t MaxCount);
+
+void setIrrLoopHeaderMetadata(Module *M, Instruction *TI, uint64_t Count);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_PGOINSTRUMENTATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/ObjCARC.h b/linux-x64/clang/include/llvm/Transforms/ObjCARC.h
new file mode 100644
index 0000000..1897adc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/ObjCARC.h
@@ -0,0 +1,48 @@
+//===-- ObjCARC.h - ObjCARC Scalar Transformations --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the ObjCARC Scalar Transformations library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_OBJCARC_H
+#define LLVM_TRANSFORMS_OBJCARC_H
+
+namespace llvm {
+
+class Pass;
+
+//===----------------------------------------------------------------------===//
+//
+// ObjCARCAPElim - ObjC ARC autorelease pool elimination.
+//
+Pass *createObjCARCAPElimPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ObjCARCExpand - ObjC ARC preliminary simplifications.
+//
+Pass *createObjCARCExpandPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ObjCARCContract - Late ObjC ARC cleanups.
+//
+Pass *createObjCARCContractPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ObjCARCOpt - ObjC ARC optimization.
+//
+Pass *createObjCARCOptPass();
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar.h b/linux-x64/clang/include/llvm/Transforms/Scalar.h
new file mode 100644
index 0000000..84c7bd4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar.h
@@ -0,0 +1,497 @@
+//===-- Scalar.h - Scalar Transformations -----------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the Scalar transformations library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_H
+#define LLVM_TRANSFORMS_SCALAR_H
+
+#include <functional>
+
+namespace llvm {
+
+class BasicBlockPass;
+class Function;
+class FunctionPass;
+class ModulePass;
+class Pass;
+class GetElementPtrInst;
+class PassInfo;
+class TerminatorInst;
+class TargetLowering;
+class TargetMachine;
+
+//===----------------------------------------------------------------------===//
+//
+// ConstantPropagation - A worklist driven constant propagation pass
+//
+FunctionPass *createConstantPropagationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// AlignmentFromAssumptions - Use assume intrinsics to set load/store
+// alignments.
+//
+FunctionPass *createAlignmentFromAssumptionsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// SCCP - Sparse conditional constant propagation.
+//
+FunctionPass *createSCCPPass();
+
+//===----------------------------------------------------------------------===//
+//
+// DeadInstElimination - This pass quickly removes trivially dead instructions
+// without modifying the CFG of the function.  It is a BasicBlockPass, so it
+// runs efficiently when queued next to other BasicBlockPass's.
+//
+Pass *createDeadInstEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// DeadCodeElimination - This pass is more powerful than DeadInstElimination,
+// because it is worklist driven that can potentially revisit instructions when
+// their other instructions become dead, to eliminate chains of dead
+// computations.
+//
+FunctionPass *createDeadCodeEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// DeadStoreElimination - This pass deletes stores that are post-dominated by
+// must-aliased stores and are not loaded used between the stores.
+//
+FunctionPass *createDeadStoreEliminationPass();
+
+
+//===----------------------------------------------------------------------===//
+//
+// CallSiteSplitting - This pass split call-site based on its known argument
+// values.
+FunctionPass *createCallSiteSplittingPass();
+
+
+//===----------------------------------------------------------------------===//
+//
+// AggressiveDCE - This pass uses the SSA based Aggressive DCE algorithm.  This
+// algorithm assumes instructions are dead until proven otherwise, which makes
+// it more successful are removing non-obviously dead instructions.
+//
+FunctionPass *createAggressiveDCEPass();
+
+
+//===----------------------------------------------------------------------===//
+//
+// GuardWidening - An optimization over the @llvm.experimental.guard intrinsic
+// that (optimistically) combines multiple guards into one to have fewer checks
+// at runtime.
+//
+FunctionPass *createGuardWideningPass();
+
+
+//===----------------------------------------------------------------------===//
+//
+// BitTrackingDCE - This pass uses a bit-tracking DCE algorithm in order to
+// remove computations of dead bits.
+//
+FunctionPass *createBitTrackingDCEPass();
+
+//===----------------------------------------------------------------------===//
+//
+// SROA - Replace aggregates or pieces of aggregates with scalar SSA values.
+//
+FunctionPass *createSROAPass();
+
+//===----------------------------------------------------------------------===//
+//
+// InductiveRangeCheckElimination - Transform loops to elide range checks on
+// linear functions of the induction variable.
+//
+Pass *createInductiveRangeCheckEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// InductionVariableSimplify - Transform induction variables in a program to all
+// use a single canonical induction variable per loop.
+//
+Pass *createIndVarSimplifyPass();
+
+//===----------------------------------------------------------------------===//
+//
+// InstructionCombining - Combine instructions to form fewer, simple
+// instructions. This pass does not modify the CFG, and has a tendency to make
+// instructions dead, so a subsequent DCE pass is useful.
+//
+// This pass combines things like:
+//    %Y = add int 1, %X
+//    %Z = add int 1, %Y
+// into:
+//    %Z = add int 2, %X
+//
+FunctionPass *createInstructionCombiningPass(bool ExpensiveCombines = true);
+
+//===----------------------------------------------------------------------===//
+//
+// AggressiveInstCombiner - Combine expression patterns to form expressions with
+// fewer, simple instructions. This pass does not modify the CFG.
+//
+FunctionPass *createAggressiveInstCombinerPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LICM - This pass is a loop invariant code motion and memory promotion pass.
+//
+Pass *createLICMPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopSink - This pass sinks invariants from preheader to loop body where
+// frequency is lower than loop preheader.
+//
+Pass *createLoopSinkPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopPredication - This pass does loop predication on guards.
+//
+Pass *createLoopPredicationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopInterchange - This pass interchanges loops to provide a more
+// cache-friendly memory access patterns.
+//
+Pass *createLoopInterchangePass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopStrengthReduce - This pass is strength reduces GEP instructions that use
+// a loop's canonical induction variable as one of their indices.
+//
+Pass *createLoopStrengthReducePass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopUnswitch - This pass is a simple loop unswitching pass.
+//
+Pass *createLoopUnswitchPass(bool OptimizeForSize = false,
+                             bool hasBranchDivergence = false);
+
+//===----------------------------------------------------------------------===//
+//
+// LoopUnroll - This pass is a simple loop unrolling pass.
+//
+Pass *createLoopUnrollPass(int OptLevel = 2, int Threshold = -1, int Count = -1,
+                           int AllowPartial = -1, int Runtime = -1,
+                           int UpperBound = -1, int AllowPeeling = -1);
+// Create an unrolling pass for full unrolling that uses exact trip count only.
+Pass *createSimpleLoopUnrollPass(int OptLevel = 2);
+
+//===----------------------------------------------------------------------===//
+//
+// LoopReroll - This pass is a simple loop rerolling pass.
+//
+Pass *createLoopRerollPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopRotate - This pass is a simple loop rotating pass.
+//
+Pass *createLoopRotatePass(int MaxHeaderSize = -1);
+
+//===----------------------------------------------------------------------===//
+//
+// LoopIdiom - This pass recognizes and replaces idioms in loops.
+//
+Pass *createLoopIdiomPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopVersioningLICM - This pass is a loop versioning pass for LICM.
+//
+Pass *createLoopVersioningLICMPass();
+
+//===----------------------------------------------------------------------===//
+//
+// DemoteRegisterToMemoryPass - This pass is used to demote registers to memory
+// references. In basically undoes the PromoteMemoryToRegister pass to make cfg
+// hacking easier.
+//
+FunctionPass *createDemoteRegisterToMemoryPass();
+extern char &DemoteRegisterToMemoryID;
+
+//===----------------------------------------------------------------------===//
+//
+// Reassociate - This pass reassociates commutative expressions in an order that
+// is designed to promote better constant propagation, GCSE, LICM, PRE...
+//
+// For example:  4 + (x + 5)  ->  x + (4 + 5)
+//
+FunctionPass *createReassociatePass();
+
+//===----------------------------------------------------------------------===//
+//
+// JumpThreading - Thread control through mult-pred/multi-succ blocks where some
+// preds always go to some succ. Thresholds other than minus one override the
+// internal BB duplication default threshold.
+//
+FunctionPass *createJumpThreadingPass(int Threshold = -1);
+
+//===----------------------------------------------------------------------===//
+//
+// CFGSimplification - Merge basic blocks, eliminate unreachable blocks,
+// simplify terminator instructions, convert switches to lookup tables, etc.
+//
+FunctionPass *createCFGSimplificationPass(
+    unsigned Threshold = 1, bool ForwardSwitchCond = false,
+    bool ConvertSwitch = false, bool KeepLoops = true, bool SinkCommon = false,
+    std::function<bool(const Function &)> Ftor = nullptr);
+
+//===----------------------------------------------------------------------===//
+//
+// FlattenCFG - flatten CFG, reduce number of conditional branches by using
+// parallel-and and parallel-or mode, etc...
+//
+FunctionPass *createFlattenCFGPass();
+
+//===----------------------------------------------------------------------===//
+//
+// CFG Structurization - Remove irreducible control flow
+//
+///
+/// When \p SkipUniformRegions is true the structizer will not structurize
+/// regions that only contain uniform branches.
+Pass *createStructurizeCFGPass(bool SkipUniformRegions = false);
+
+//===----------------------------------------------------------------------===//
+//
+// TailCallElimination - This pass eliminates call instructions to the current
+// function which occur immediately before return instructions.
+//
+FunctionPass *createTailCallEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// EarlyCSE - This pass performs a simple and fast CSE pass over the dominator
+// tree.
+//
+FunctionPass *createEarlyCSEPass(bool UseMemorySSA = false);
+
+//===----------------------------------------------------------------------===//
+//
+// GVNHoist - This pass performs a simple and fast GVN pass over the dominator
+// tree to hoist common expressions from sibling branches.
+//
+FunctionPass *createGVNHoistPass();
+
+//===----------------------------------------------------------------------===//
+//
+// GVNSink - This pass uses an "inverted" value numbering to decide the
+// similarity of expressions and sinks similar expressions into successors.
+//
+FunctionPass *createGVNSinkPass();
+
+//===----------------------------------------------------------------------===//
+//
+// MergedLoadStoreMotion - This pass merges loads and stores in diamonds. Loads
+// are hoisted into the header, while stores sink into the footer.
+//
+FunctionPass *createMergedLoadStoreMotionPass();
+
+//===----------------------------------------------------------------------===//
+//
+// GVN - This pass performs global value numbering and redundant load
+// elimination cotemporaneously.
+//
+FunctionPass *createNewGVNPass();
+
+//===----------------------------------------------------------------------===//
+//
+// DivRemPairs - Hoist/decompose integer division and remainder instructions.
+//
+FunctionPass *createDivRemPairsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// MemCpyOpt - This pass performs optimizations related to eliminating memcpy
+// calls and/or combining multiple stores into memset's.
+//
+FunctionPass *createMemCpyOptPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopDeletion - This pass performs DCE of non-infinite loops that it
+// can prove are dead.
+//
+Pass *createLoopDeletionPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ConstantHoisting - This pass prepares a function for expensive constants.
+//
+FunctionPass *createConstantHoistingPass();
+
+//===----------------------------------------------------------------------===//
+//
+// Sink - Code Sinking
+//
+FunctionPass *createSinkingPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LowerAtomic - Lower atomic intrinsics to non-atomic form
+//
+Pass *createLowerAtomicPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LowerGuardIntrinsic - Lower guard intrinsics to normal control flow.
+//
+Pass *createLowerGuardIntrinsicPass();
+
+//===----------------------------------------------------------------------===//
+//
+// MergeICmps - Merge integer comparison chains into a memcmp
+//
+Pass *createMergeICmpsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ValuePropagation - Propagate CFG-derived value information
+//
+Pass *createCorrelatedValuePropagationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// InferAddressSpaces - Modify users of addrspacecast instructions with values
+// in the source address space if using the destination address space is slower
+// on the target.
+//
+FunctionPass *createInferAddressSpacesPass();
+extern char &InferAddressSpacesID;
+
+//===----------------------------------------------------------------------===//
+//
+// LowerExpectIntrinsics - Removes llvm.expect intrinsics and creates
+// "block_weights" metadata.
+FunctionPass *createLowerExpectIntrinsicPass();
+
+//===----------------------------------------------------------------------===//
+//
+// PartiallyInlineLibCalls - Tries to inline the fast path of library
+// calls such as sqrt.
+//
+FunctionPass *createPartiallyInlineLibCallsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ScalarizerPass - Converts vector operations into scalar operations
+//
+FunctionPass *createScalarizerPass();
+
+//===----------------------------------------------------------------------===//
+//
+// SeparateConstOffsetFromGEP - Split GEPs for better CSE
+//
+FunctionPass *createSeparateConstOffsetFromGEPPass(bool LowerGEP = false);
+
+//===----------------------------------------------------------------------===//
+//
+// SpeculativeExecution - Aggressively hoist instructions to enable
+// speculative execution on targets where branches are expensive.
+//
+FunctionPass *createSpeculativeExecutionPass();
+
+// Same as createSpeculativeExecutionPass, but does nothing unless
+// TargetTransformInfo::hasBranchDivergence() is true.
+FunctionPass *createSpeculativeExecutionIfHasBranchDivergencePass();
+
+//===----------------------------------------------------------------------===//
+//
+// StraightLineStrengthReduce - This pass strength-reduces some certain
+// instruction patterns in straight-line code.
+//
+FunctionPass *createStraightLineStrengthReducePass();
+
+//===----------------------------------------------------------------------===//
+//
+// PlaceSafepoints - Rewrite any IR calls to gc.statepoints and insert any
+// safepoint polls (method entry, backedge) that might be required.  This pass
+// does not generate explicit relocation sequences - that's handled by
+// RewriteStatepointsForGC which can be run at an arbitrary point in the pass
+// order following this pass.
+//
+FunctionPass *createPlaceSafepointsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// RewriteStatepointsForGC - Rewrite any gc.statepoints which do not yet have
+// explicit relocations to include explicit relocations.
+//
+ModulePass *createRewriteStatepointsForGCLegacyPass();
+
+//===----------------------------------------------------------------------===//
+//
+// Float2Int - Demote floats to ints where possible.
+//
+FunctionPass *createFloat2IntPass();
+
+//===----------------------------------------------------------------------===//
+//
+// NaryReassociate - Simplify n-ary operations by reassociation.
+//
+FunctionPass *createNaryReassociatePass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopDistribute - Distribute loops.
+//
+FunctionPass *createLoopDistributePass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopLoadElimination - Perform loop-aware load elimination.
+//
+FunctionPass *createLoopLoadEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopVersioning - Perform loop multi-versioning.
+//
+FunctionPass *createLoopVersioningPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopDataPrefetch - Perform data prefetching in loops.
+//
+FunctionPass *createLoopDataPrefetchPass();
+
+///===---------------------------------------------------------------------===//
+ModulePass *createNameAnonGlobalPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LibCallsShrinkWrap - Shrink-wraps a call to function if the result is not
+// used.
+//
+FunctionPass *createLibCallsShrinkWrapPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopSimplifyCFG - This pass performs basic CFG simplification on loops,
+// primarily to help other loop passes.
+//
+Pass *createLoopSimplifyCFGPass();
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/ADCE.h b/linux-x64/clang/include/llvm/Transforms/Scalar/ADCE.h
new file mode 100644
index 0000000..f98af62
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/ADCE.h
@@ -0,0 +1,38 @@
+//===- ADCE.h - Aggressive dead code elimination ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface for the Aggressive Dead Code Elimination
+// pass. This pass optimistically assumes that all instructions are dead until
+// proven otherwise, allowing it to eliminate dead computations that other DCE
+// passes do not catch, particularly involving loop computations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_ADCE_H
+#define LLVM_TRANSFORMS_SCALAR_ADCE_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+/// A DCE pass that assumes instructions are dead until proven otherwise.
+///
+/// This pass eliminates dead code by optimistically assuming that all
+/// instructions are dead until proven otherwise. This allows it to eliminate
+/// dead computations that other DCE passes do not catch, particularly involving
+/// loop computations.
+struct ADCEPass : PassInfoMixin<ADCEPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_ADCE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h b/linux-x64/clang/include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h
new file mode 100644
index 0000000..6197503
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h
@@ -0,0 +1,45 @@
+//===---- AlignmentFromAssumptions.h ----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a ScalarEvolution-based transformation to set
+// the alignments of load, stores and memory intrinsics based on the truth
+// expressions of assume intrinsics. The primary motivation is to handle
+// complex alignment assumptions that apply to vector loads and stores that
+// appear after vectorization and unrolling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_ALIGNMENTFROMASSUMPTIONS_H
+#define LLVM_TRANSFORMS_SCALAR_ALIGNMENTFROMASSUMPTIONS_H
+
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+struct AlignmentFromAssumptionsPass
+    : public PassInfoMixin<AlignmentFromAssumptionsPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  // Glue for old PM.
+  bool runImpl(Function &F, AssumptionCache &AC, ScalarEvolution *SE_,
+               DominatorTree *DT_);
+
+  ScalarEvolution *SE = nullptr;
+  DominatorTree *DT = nullptr;
+
+  bool extractAlignmentInfo(CallInst *I, Value *&AAPtr, const SCEV *&AlignSCEV,
+                            const SCEV *&OffSCEV);
+  bool processAssumption(CallInst *I);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_ALIGNMENTFROMASSUMPTIONS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/BDCE.h b/linux-x64/clang/include/llvm/Transforms/Scalar/BDCE.h
new file mode 100644
index 0000000..d7d2730
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/BDCE.h
@@ -0,0 +1,31 @@
+//===---- BDCE.cpp - Bit-tracking dead code elimination ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Bit-Tracking Dead Code Elimination pass. Some
+// instructions (shifts, some ands, ors, etc.) kill some of their input bits.
+// We track these dead bits and remove instructions that compute only these
+// dead bits.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_BDCE_H
+#define LLVM_TRANSFORMS_SCALAR_BDCE_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+// The Bit-Tracking Dead Code Elimination pass.
+struct BDCEPass : PassInfoMixin<BDCEPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_BDCE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/CallSiteSplitting.h b/linux-x64/clang/include/llvm/Transforms/Scalar/CallSiteSplitting.h
new file mode 100644
index 0000000..5ab951a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/CallSiteSplitting.h
@@ -0,0 +1,29 @@
+//===- CallSiteSplitting..h - Callsite Splitting ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING__H
+#define LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING__H
+
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/Compiler.h"
+#include <vector>
+
+namespace llvm {
+
+struct CallSiteSplittingPass : PassInfoMixin<CallSiteSplittingPass> {
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING__H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/ConstantHoisting.h b/linux-x64/clang/include/llvm/Transforms/Scalar/ConstantHoisting.h
new file mode 100644
index 0000000..d3322dc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/ConstantHoisting.h
@@ -0,0 +1,167 @@
+//==- ConstantHoisting.h - Prepare code for expensive constants --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass identifies expensive constants to hoist and coalesces them to
+// better prepare it for SelectionDAG-based code generation. This works around
+// the limitations of the basic-block-at-a-time approach.
+//
+// First it scans all instructions for integer constants and calculates its
+// cost. If the constant can be folded into the instruction (the cost is
+// TCC_Free) or the cost is just a simple operation (TCC_BASIC), then we don't
+// consider it expensive and leave it alone. This is the default behavior and
+// the default implementation of getIntImmCost will always return TCC_Free.
+//
+// If the cost is more than TCC_BASIC, then the integer constant can't be folded
+// into the instruction and it might be beneficial to hoist the constant.
+// Similar constants are coalesced to reduce register pressure and
+// materialization code.
+//
+// When a constant is hoisted, it is also hidden behind a bitcast to force it to
+// be live-out of the basic block. Otherwise the constant would be just
+// duplicated and each basic block would have its own copy in the SelectionDAG.
+// The SelectionDAG recognizes such constants as opaque and doesn't perform
+// certain transformations on them, which would create a new expensive constant.
+//
+// This optimization is only applied to integer constants in instructions and
+// simple (this means not nested) constant cast expressions. For example:
+// %0 = load i64* inttoptr (i64 big_constant to i64*)
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_CONSTANTHOISTING_H
+#define LLVM_TRANSFORMS_SCALAR_CONSTANTHOISTING_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/PassManager.h"
+#include <algorithm>
+#include <vector>
+
+namespace llvm {
+
+class BasicBlock;
+class BlockFrequencyInfo;
+class Constant;
+class ConstantInt;
+class DominatorTree;
+class Function;
+class Instruction;
+class TargetTransformInfo;
+
+/// A private "module" namespace for types and utilities used by
+/// ConstantHoisting. These are implementation details and should not be used by
+/// clients.
+namespace consthoist {
+
+/// \brief Keeps track of the user of a constant and the operand index where the
+/// constant is used.
+struct ConstantUser {
+  Instruction *Inst;
+  unsigned OpndIdx;
+
+  ConstantUser(Instruction *Inst, unsigned Idx) : Inst(Inst), OpndIdx(Idx) {}
+};
+
+using ConstantUseListType = SmallVector<ConstantUser, 8>;
+
+/// \brief Keeps track of a constant candidate and its uses.
+struct ConstantCandidate {
+  ConstantUseListType Uses;
+  ConstantInt *ConstInt;
+  unsigned CumulativeCost = 0;
+
+  ConstantCandidate(ConstantInt *ConstInt) : ConstInt(ConstInt) {}
+
+  /// \brief Add the user to the use list and update the cost.
+  void addUser(Instruction *Inst, unsigned Idx, unsigned Cost) {
+    CumulativeCost += Cost;
+    Uses.push_back(ConstantUser(Inst, Idx));
+  }
+};
+
+/// \brief This represents a constant that has been rebased with respect to a
+/// base constant. The difference to the base constant is recorded in Offset.
+struct RebasedConstantInfo {
+  ConstantUseListType Uses;
+  Constant *Offset;
+
+  RebasedConstantInfo(ConstantUseListType &&Uses, Constant *Offset)
+    : Uses(std::move(Uses)), Offset(Offset) {}
+};
+
+using RebasedConstantListType = SmallVector<RebasedConstantInfo, 4>;
+
+/// \brief A base constant and all its rebased constants.
+struct ConstantInfo {
+  ConstantInt *BaseConstant;
+  RebasedConstantListType RebasedConstants;
+};
+
+} // end namespace consthoist
+
+class ConstantHoistingPass : public PassInfoMixin<ConstantHoistingPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  // Glue for old PM.
+  bool runImpl(Function &F, TargetTransformInfo &TTI, DominatorTree &DT,
+               BlockFrequencyInfo *BFI, BasicBlock &Entry);
+
+  void releaseMemory() {
+    ConstantVec.clear();
+    ClonedCastMap.clear();
+    ConstCandVec.clear();
+  }
+
+private:
+  using ConstCandMapType = DenseMap<ConstantInt *, unsigned>;
+  using ConstCandVecType = std::vector<consthoist::ConstantCandidate>;
+
+  const TargetTransformInfo *TTI;
+  DominatorTree *DT;
+  BlockFrequencyInfo *BFI;
+  BasicBlock *Entry;
+
+  /// Keeps track of constant candidates found in the function.
+  ConstCandVecType ConstCandVec;
+
+  /// Keep track of cast instructions we already cloned.
+  SmallDenseMap<Instruction *, Instruction *> ClonedCastMap;
+
+  /// These are the final constants we decided to hoist.
+  SmallVector<consthoist::ConstantInfo, 8> ConstantVec;
+
+  Instruction *findMatInsertPt(Instruction *Inst, unsigned Idx = ~0U) const;
+  SmallPtrSet<Instruction *, 8>
+  findConstantInsertionPoint(const consthoist::ConstantInfo &ConstInfo) const;
+  void collectConstantCandidates(ConstCandMapType &ConstCandMap,
+                                 Instruction *Inst, unsigned Idx,
+                                 ConstantInt *ConstInt);
+  void collectConstantCandidates(ConstCandMapType &ConstCandMap,
+                                 Instruction *Inst, unsigned Idx);
+  void collectConstantCandidates(ConstCandMapType &ConstCandMap,
+                                 Instruction *Inst);
+  void collectConstantCandidates(Function &Fn);
+  void findAndMakeBaseConstant(ConstCandVecType::iterator S,
+                               ConstCandVecType::iterator E);
+  unsigned maximizeConstantsInRange(ConstCandVecType::iterator S,
+                                    ConstCandVecType::iterator E,
+                                    ConstCandVecType::iterator &MaxCostItr);
+  void findBaseConstants();
+  void emitBaseConstants(Instruction *Base, Constant *Offset,
+                         const consthoist::ConstantUser &ConstUser);
+  bool emitBaseConstants();
+  void deleteDeadCastInst() const;
+  bool optimizeConstants(Function &Fn);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_CONSTANTHOISTING_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/CorrelatedValuePropagation.h b/linux-x64/clang/include/llvm/Transforms/Scalar/CorrelatedValuePropagation.h
new file mode 100644
index 0000000..2093069
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/CorrelatedValuePropagation.h
@@ -0,0 +1,26 @@
+//===- CorrelatedValuePropagation.h -----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_CORRELATEDVALUEPROPAGATION_H
+#define LLVM_TRANSFORMS_SCALAR_CORRELATEDVALUEPROPAGATION_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+struct CorrelatedValuePropagationPass
+    : PassInfoMixin<CorrelatedValuePropagationPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_CORRELATEDVALUEPROPAGATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/DCE.h b/linux-x64/clang/include/llvm/Transforms/Scalar/DCE.h
new file mode 100644
index 0000000..273346c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/DCE.h
@@ -0,0 +1,29 @@
+//===- DCE.h - Dead code elimination ----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface for the Dead Code Elimination pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_DCE_H
+#define LLVM_TRANSFORMS_SCALAR_DCE_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Basic Dead Code Elimination pass.
+class DCEPass : public PassInfoMixin<DCEPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_DCE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/DeadStoreElimination.h b/linux-x64/clang/include/llvm/Transforms/Scalar/DeadStoreElimination.h
new file mode 100644
index 0000000..cfeb218
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/DeadStoreElimination.h
@@ -0,0 +1,36 @@
+//===- DeadStoreElimination.h - Fast Dead Store Elimination -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a trivial dead store elimination that only considers
+// basic-block local redundant stores.
+//
+// FIXME: This should eventually be extended to be a post-dominator tree
+// traversal.  Doing so would be pretty trivial.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_DEADSTOREELIMINATION_H
+#define LLVM_TRANSFORMS_SCALAR_DEADSTOREELIMINATION_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+/// This class implements a trivial dead store elimination. We consider
+/// only the redundant stores that are local to a single Basic Block.
+class DSEPass : public PassInfoMixin<DSEPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_DEADSTOREELIMINATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/DivRemPairs.h b/linux-x64/clang/include/llvm/Transforms/Scalar/DivRemPairs.h
new file mode 100644
index 0000000..0a4346f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/DivRemPairs.h
@@ -0,0 +1,31 @@
+//===- DivRemPairs.h - Hoist/decompose integer division and remainder -----===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass hoists and/or decomposes integer division and remainder
+// instructions to enable CFG improvements and better codegen.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_DIVREMPAIRS_H
+#define LLVM_TRANSFORMS_SCALAR_DIVREMPAIRS_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Hoist/decompose integer division and remainder instructions to enable CFG
+/// improvements and better codegen.
+struct DivRemPairsPass : public PassInfoMixin<DivRemPairsPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
+};
+
+}
+#endif // LLVM_TRANSFORMS_SCALAR_DIVREMPAIRS_H
+
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/EarlyCSE.h b/linux-x64/clang/include/llvm/Transforms/Scalar/EarlyCSE.h
new file mode 100644
index 0000000..dca3b2d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/EarlyCSE.h
@@ -0,0 +1,42 @@
+//===- EarlyCSE.h - Simple and fast CSE pass --------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file provides the interface for a simple, fast CSE pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_EARLYCSE_H
+#define LLVM_TRANSFORMS_SCALAR_EARLYCSE_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+/// \brief A simple and fast domtree-based CSE pass.
+///
+/// This pass does a simple depth-first walk over the dominator tree,
+/// eliminating trivially redundant instructions and using instsimplify to
+/// canonicalize things as it goes. It is intended to be fast and catch obvious
+/// cases so that instcombine and other passes are more effective. It is
+/// expected that a later pass of GVN will catch the interesting/hard cases.
+struct EarlyCSEPass : PassInfoMixin<EarlyCSEPass> {
+  EarlyCSEPass(bool UseMemorySSA = false) : UseMemorySSA(UseMemorySSA) {}
+
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  bool UseMemorySSA;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_EARLYCSE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/Float2Int.h b/linux-x64/clang/include/llvm/Transforms/Scalar/Float2Int.h
new file mode 100644
index 0000000..206ee98
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/Float2Int.h
@@ -0,0 +1,51 @@
+//===-- Float2Int.h - Demote floating point ops to work on integers -------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the Float2Int pass, which aims to demote floating
+// point operations to work on integers, where that is losslessly possible.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_FLOAT2INT_H
+#define LLVM_TRANSFORMS_SCALAR_FLOAT2INT_H
+
+#include "llvm/ADT/EquivalenceClasses.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class Float2IntPass : public PassInfoMixin<Float2IntPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  // Glue for old PM.
+  bool runImpl(Function &F);
+
+private:
+  void findRoots(Function &F, SmallPtrSet<Instruction *, 8> &Roots);
+  void seen(Instruction *I, ConstantRange R);
+  ConstantRange badRange();
+  ConstantRange unknownRange();
+  ConstantRange validateRange(ConstantRange R);
+  void walkBackwards(const SmallPtrSetImpl<Instruction *> &Roots);
+  void walkForwards();
+  bool validateAndTransform();
+  Value *convert(Instruction *I, Type *ToTy);
+  void cleanup();
+
+  MapVector<Instruction *, ConstantRange> SeenInsts;
+  SmallPtrSet<Instruction *, 8> Roots;
+  EquivalenceClasses<Instruction *> ECs;
+  MapVector<Instruction *, Value *> ConvertedInsts;
+  LLVMContext *Ctx;
+};
+}
+#endif // LLVM_TRANSFORMS_SCALAR_FLOAT2INT_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/GVN.h b/linux-x64/clang/include/llvm/Transforms/Scalar/GVN.h
new file mode 100644
index 0000000..440d3f6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/GVN.h
@@ -0,0 +1,310 @@
+//===- GVN.h - Eliminate redundant values and loads -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for LLVM's Global Value Numbering pass
+/// which eliminates fully redundant instructions. It also does somewhat Ad-Hoc
+/// PRE and dead load elimination.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_GVN_H
+#define LLVM_TRANSFORMS_SCALAR_GVN_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/MemoryDependenceAnalysis.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Transforms/Utils/OrderedInstructions.h"
+#include <cstdint>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class AssumptionCache;
+class BasicBlock;
+class BranchInst;
+class CallInst;
+class Constant;
+class ExtractValueInst;
+class Function;
+class FunctionPass;
+class IntrinsicInst;
+class LoadInst;
+class LoopInfo;
+class OptimizationRemarkEmitter;
+class PHINode;
+class TargetLibraryInfo;
+class Value;
+
+/// A private "module" namespace for types and utilities used by GVN. These
+/// are implementation details and should not be used by clients.
+namespace gvn LLVM_LIBRARY_VISIBILITY {
+
+struct AvailableValue;
+struct AvailableValueInBlock;
+class GVNLegacyPass;
+
+} // end namespace gvn
+
+/// The core GVN pass object.
+///
+/// FIXME: We should have a good summary of the GVN algorithm implemented by
+/// this particular pass here.
+class GVN : public PassInfoMixin<GVN> {
+public:
+  struct Expression;
+
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  /// This removes the specified instruction from
+  /// our various maps and marks it for deletion.
+  void markInstructionForDeletion(Instruction *I) {
+    VN.erase(I);
+    InstrsToErase.push_back(I);
+  }
+
+  DominatorTree &getDominatorTree() const { return *DT; }
+  AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); }
+  MemoryDependenceResults &getMemDep() const { return *MD; }
+
+  /// This class holds the mapping between values and value numbers.  It is used
+  /// as an efficient mechanism to determine the expression-wise equivalence of
+  /// two values.
+  class ValueTable {
+    DenseMap<Value *, uint32_t> valueNumbering;
+    DenseMap<Expression, uint32_t> expressionNumbering;
+
+    // Expressions is the vector of Expression. ExprIdx is the mapping from
+    // value number to the index of Expression in Expressions. We use it
+    // instead of a DenseMap because filling such mapping is faster than
+    // filling a DenseMap and the compile time is a little better.
+    uint32_t nextExprNumber;
+
+    std::vector<Expression> Expressions;
+    std::vector<uint32_t> ExprIdx;
+
+    // Value number to PHINode mapping. Used for phi-translate in scalarpre.
+    DenseMap<uint32_t, PHINode *> NumberingPhi;
+
+    // Cache for phi-translate in scalarpre.
+    using PhiTranslateMap =
+        DenseMap<std::pair<uint32_t, const BasicBlock *>, uint32_t>;
+    PhiTranslateMap PhiTranslateTable;
+
+    AliasAnalysis *AA;
+    MemoryDependenceResults *MD;
+    DominatorTree *DT;
+
+    uint32_t nextValueNumber = 1;
+
+    Expression createExpr(Instruction *I);
+    Expression createCmpExpr(unsigned Opcode, CmpInst::Predicate Predicate,
+                             Value *LHS, Value *RHS);
+    Expression createExtractvalueExpr(ExtractValueInst *EI);
+    uint32_t lookupOrAddCall(CallInst *C);
+    uint32_t phiTranslateImpl(const BasicBlock *BB, const BasicBlock *PhiBlock,
+                              uint32_t Num, GVN &Gvn);
+    std::pair<uint32_t, bool> assignExpNewValueNum(Expression &exp);
+    bool areAllValsInBB(uint32_t num, const BasicBlock *BB, GVN &Gvn);
+
+  public:
+    ValueTable();
+    ValueTable(const ValueTable &Arg);
+    ValueTable(ValueTable &&Arg);
+    ~ValueTable();
+
+    uint32_t lookupOrAdd(Value *V);
+    uint32_t lookup(Value *V, bool Verify = true) const;
+    uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred,
+                            Value *LHS, Value *RHS);
+    uint32_t phiTranslate(const BasicBlock *BB, const BasicBlock *PhiBlock,
+                          uint32_t Num, GVN &Gvn);
+    void eraseTranslateCacheEntry(uint32_t Num, const BasicBlock &CurrBlock);
+    bool exists(Value *V) const;
+    void add(Value *V, uint32_t num);
+    void clear();
+    void erase(Value *v);
+    void setAliasAnalysis(AliasAnalysis *A) { AA = A; }
+    AliasAnalysis *getAliasAnalysis() const { return AA; }
+    void setMemDep(MemoryDependenceResults *M) { MD = M; }
+    void setDomTree(DominatorTree *D) { DT = D; }
+    uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
+    void verifyRemoved(const Value *) const;
+  };
+
+private:
+  friend class gvn::GVNLegacyPass;
+  friend struct DenseMapInfo<Expression>;
+
+  MemoryDependenceResults *MD;
+  DominatorTree *DT;
+  const TargetLibraryInfo *TLI;
+  AssumptionCache *AC;
+  SetVector<BasicBlock *> DeadBlocks;
+  OptimizationRemarkEmitter *ORE;
+  // Maps a block to the topmost instruction with implicit control flow in it.
+  DenseMap<const BasicBlock *, const Instruction *>
+      FirstImplicitControlFlowInsts;
+
+  OrderedInstructions *OI;
+  ValueTable VN;
+
+  /// A mapping from value numbers to lists of Value*'s that
+  /// have that value number.  Use findLeader to query it.
+  struct LeaderTableEntry {
+    Value *Val;
+    const BasicBlock *BB;
+    LeaderTableEntry *Next;
+  };
+  DenseMap<uint32_t, LeaderTableEntry> LeaderTable;
+  BumpPtrAllocator TableAllocator;
+
+  // Block-local map of equivalent values to their leader, does not
+  // propagate to any successors. Entries added mid-block are applied
+  // to the remaining instructions in the block.
+  SmallMapVector<Value *, Constant *, 4> ReplaceWithConstMap;
+  SmallVector<Instruction *, 8> InstrsToErase;
+
+  // Map the block to reversed postorder traversal number. It is used to
+  // find back edge easily.
+  DenseMap<const BasicBlock *, uint32_t> BlockRPONumber;
+
+  using LoadDepVect = SmallVector<NonLocalDepResult, 64>;
+  using AvailValInBlkVect = SmallVector<gvn::AvailableValueInBlock, 64>;
+  using UnavailBlkVect = SmallVector<BasicBlock *, 64>;
+
+  bool runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
+               const TargetLibraryInfo &RunTLI, AAResults &RunAA,
+               MemoryDependenceResults *RunMD, LoopInfo *LI,
+               OptimizationRemarkEmitter *ORE);
+
+  /// Push a new Value to the LeaderTable onto the list for its value number.
+  void addToLeaderTable(uint32_t N, Value *V, const BasicBlock *BB) {
+    LeaderTableEntry &Curr = LeaderTable[N];
+    if (!Curr.Val) {
+      Curr.Val = V;
+      Curr.BB = BB;
+      return;
+    }
+
+    LeaderTableEntry *Node = TableAllocator.Allocate<LeaderTableEntry>();
+    Node->Val = V;
+    Node->BB = BB;
+    Node->Next = Curr.Next;
+    Curr.Next = Node;
+  }
+
+  /// Scan the list of values corresponding to a given
+  /// value number, and remove the given instruction if encountered.
+  void removeFromLeaderTable(uint32_t N, Instruction *I, BasicBlock *BB) {
+    LeaderTableEntry *Prev = nullptr;
+    LeaderTableEntry *Curr = &LeaderTable[N];
+
+    while (Curr && (Curr->Val != I || Curr->BB != BB)) {
+      Prev = Curr;
+      Curr = Curr->Next;
+    }
+
+    if (!Curr)
+      return;
+
+    if (Prev) {
+      Prev->Next = Curr->Next;
+    } else {
+      if (!Curr->Next) {
+        Curr->Val = nullptr;
+        Curr->BB = nullptr;
+      } else {
+        LeaderTableEntry *Next = Curr->Next;
+        Curr->Val = Next->Val;
+        Curr->BB = Next->BB;
+        Curr->Next = Next->Next;
+      }
+    }
+  }
+
+  // List of critical edges to be split between iterations.
+  SmallVector<std::pair<TerminatorInst *, unsigned>, 4> toSplit;
+
+  // Helper functions of redundant load elimination
+  bool processLoad(LoadInst *L);
+  bool processNonLocalLoad(LoadInst *L);
+  bool processAssumeIntrinsic(IntrinsicInst *II);
+
+  /// Given a local dependency (Def or Clobber) determine if a value is
+  /// available for the load.  Returns true if an value is known to be
+  /// available and populates Res.  Returns false otherwise.
+  bool AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
+                               Value *Address, gvn::AvailableValue &Res);
+
+  /// Given a list of non-local dependencies, determine if a value is
+  /// available for the load in each specified block.  If it is, add it to
+  /// ValuesPerBlock.  If not, add it to UnavailableBlocks.
+  void AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
+                               AvailValInBlkVect &ValuesPerBlock,
+                               UnavailBlkVect &UnavailableBlocks);
+
+  bool PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
+                      UnavailBlkVect &UnavailableBlocks);
+
+  // Other helper routines
+  bool processInstruction(Instruction *I);
+  bool processBlock(BasicBlock *BB);
+  void dump(DenseMap<uint32_t, Value *> &d) const;
+  bool iterateOnFunction(Function &F);
+  bool performPRE(Function &F);
+  bool performScalarPRE(Instruction *I);
+  bool performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
+                                 BasicBlock *Curr, unsigned int ValNo);
+  Value *findLeader(const BasicBlock *BB, uint32_t num);
+  void cleanupGlobalSets();
+  void fillImplicitControlFlowInfo(BasicBlock *BB);
+  void verifyRemoved(const Instruction *I) const;
+  bool splitCriticalEdges();
+  BasicBlock *splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ);
+  bool replaceOperandsWithConsts(Instruction *I) const;
+  bool propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root,
+                         bool DominatesByEdge);
+  bool processFoldableCondBr(BranchInst *BI);
+  void addDeadBlock(BasicBlock *BB);
+  void assignValNumForDeadCode();
+  void assignBlockRPONumber(Function &F);
+};
+
+/// Create a legacy GVN pass. This also allows parameterizing whether or not
+/// loads are eliminated by the pass.
+FunctionPass *createGVNPass(bool NoLoads = false);
+
+/// \brief A simple and fast domtree-based GVN pass to hoist common expressions
+/// from sibling branches.
+struct GVNHoistPass : PassInfoMixin<GVNHoistPass> {
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Uses an "inverted" value numbering to decide the similarity of
+/// expressions and sinks similar expressions into successors.
+struct GVNSinkPass : PassInfoMixin<GVNSinkPass> {
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_GVN_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/GVNExpression.h b/linux-x64/clang/include/llvm/Transforms/Scalar/GVNExpression.h
new file mode 100644
index 0000000..99dae15
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/GVNExpression.h
@@ -0,0 +1,661 @@
+//===- GVNExpression.h - GVN Expression classes -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+///
+/// The header file for the GVN pass that contains expression handling
+/// classes
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_GVNEXPRESSION_H
+#define LLVM_TRANSFORMS_SCALAR_GVNEXPRESSION_H
+
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Analysis/MemorySSA.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/ArrayRecycler.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+class BasicBlock;
+class Type;
+
+namespace GVNExpression {
+
+enum ExpressionType {
+  ET_Base,
+  ET_Constant,
+  ET_Variable,
+  ET_Dead,
+  ET_Unknown,
+  ET_BasicStart,
+  ET_Basic,
+  ET_AggregateValue,
+  ET_Phi,
+  ET_MemoryStart,
+  ET_Call,
+  ET_Load,
+  ET_Store,
+  ET_MemoryEnd,
+  ET_BasicEnd
+};
+
+class Expression {
+private:
+  ExpressionType EType;
+  unsigned Opcode;
+  mutable hash_code HashVal = 0;
+
+public:
+  Expression(ExpressionType ET = ET_Base, unsigned O = ~2U)
+      : EType(ET), Opcode(O) {}
+  Expression(const Expression &) = delete;
+  Expression &operator=(const Expression &) = delete;
+  virtual ~Expression();
+
+  static unsigned getEmptyKey() { return ~0U; }
+  static unsigned getTombstoneKey() { return ~1U; }
+
+  bool operator!=(const Expression &Other) const { return !(*this == Other); }
+  bool operator==(const Expression &Other) const {
+    if (getOpcode() != Other.getOpcode())
+      return false;
+    if (getOpcode() == getEmptyKey() || getOpcode() == getTombstoneKey())
+      return true;
+    // Compare the expression type for anything but load and store.
+    // For load and store we set the opcode to zero to make them equal.
+    if (getExpressionType() != ET_Load && getExpressionType() != ET_Store &&
+        getExpressionType() != Other.getExpressionType())
+      return false;
+
+    return equals(Other);
+  }
+
+  hash_code getComputedHash() const {
+    // It's theoretically possible for a thing to hash to zero.  In that case,
+    // we will just compute the hash a few extra times, which is no worse that
+    // we did before, which was to compute it always.
+    if (static_cast<unsigned>(HashVal) == 0)
+      HashVal = getHashValue();
+    return HashVal;
+  }
+
+  virtual bool equals(const Expression &Other) const { return true; }
+
+  // Return true if the two expressions are exactly the same, including the
+  // normally ignored fields.
+  virtual bool exactlyEquals(const Expression &Other) const {
+    return getExpressionType() == Other.getExpressionType() && equals(Other);
+  }
+
+  unsigned getOpcode() const { return Opcode; }
+  void setOpcode(unsigned opcode) { Opcode = opcode; }
+  ExpressionType getExpressionType() const { return EType; }
+
+  // We deliberately leave the expression type out of the hash value.
+  virtual hash_code getHashValue() const { return getOpcode(); }
+
+  // Debugging support
+  virtual void printInternal(raw_ostream &OS, bool PrintEType) const {
+    if (PrintEType)
+      OS << "etype = " << getExpressionType() << ",";
+    OS << "opcode = " << getOpcode() << ", ";
+  }
+
+  void print(raw_ostream &OS) const {
+    OS << "{ ";
+    printInternal(OS, true);
+    OS << "}";
+  }
+
+  LLVM_DUMP_METHOD void dump() const;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const Expression &E) {
+  E.print(OS);
+  return OS;
+}
+
+class BasicExpression : public Expression {
+private:
+  using RecyclerType = ArrayRecycler<Value *>;
+  using RecyclerCapacity = RecyclerType::Capacity;
+
+  Value **Operands = nullptr;
+  unsigned MaxOperands;
+  unsigned NumOperands = 0;
+  Type *ValueType = nullptr;
+
+public:
+  BasicExpression(unsigned NumOperands)
+      : BasicExpression(NumOperands, ET_Basic) {}
+  BasicExpression(unsigned NumOperands, ExpressionType ET)
+      : Expression(ET), MaxOperands(NumOperands) {}
+  BasicExpression() = delete;
+  BasicExpression(const BasicExpression &) = delete;
+  BasicExpression &operator=(const BasicExpression &) = delete;
+  ~BasicExpression() override;
+
+  static bool classof(const Expression *EB) {
+    ExpressionType ET = EB->getExpressionType();
+    return ET > ET_BasicStart && ET < ET_BasicEnd;
+  }
+
+  /// \brief Swap two operands. Used during GVN to put commutative operands in
+  /// order.
+  void swapOperands(unsigned First, unsigned Second) {
+    std::swap(Operands[First], Operands[Second]);
+  }
+
+  Value *getOperand(unsigned N) const {
+    assert(Operands && "Operands not allocated");
+    assert(N < NumOperands && "Operand out of range");
+    return Operands[N];
+  }
+
+  void setOperand(unsigned N, Value *V) {
+    assert(Operands && "Operands not allocated before setting");
+    assert(N < NumOperands && "Operand out of range");
+    Operands[N] = V;
+  }
+
+  unsigned getNumOperands() const { return NumOperands; }
+
+  using op_iterator = Value **;
+  using const_op_iterator = Value *const *;
+
+  op_iterator op_begin() { return Operands; }
+  op_iterator op_end() { return Operands + NumOperands; }
+  const_op_iterator op_begin() const { return Operands; }
+  const_op_iterator op_end() const { return Operands + NumOperands; }
+  iterator_range<op_iterator> operands() {
+    return iterator_range<op_iterator>(op_begin(), op_end());
+  }
+  iterator_range<const_op_iterator> operands() const {
+    return iterator_range<const_op_iterator>(op_begin(), op_end());
+  }
+
+  void op_push_back(Value *Arg) {
+    assert(NumOperands < MaxOperands && "Tried to add too many operands");
+    assert(Operands && "Operandss not allocated before pushing");
+    Operands[NumOperands++] = Arg;
+  }
+  bool op_empty() const { return getNumOperands() == 0; }
+
+  void allocateOperands(RecyclerType &Recycler, BumpPtrAllocator &Allocator) {
+    assert(!Operands && "Operands already allocated");
+    Operands = Recycler.allocate(RecyclerCapacity::get(MaxOperands), Allocator);
+  }
+  void deallocateOperands(RecyclerType &Recycler) {
+    Recycler.deallocate(RecyclerCapacity::get(MaxOperands), Operands);
+  }
+
+  void setType(Type *T) { ValueType = T; }
+  Type *getType() const { return ValueType; }
+
+  bool equals(const Expression &Other) const override {
+    if (getOpcode() != Other.getOpcode())
+      return false;
+
+    const auto &OE = cast<BasicExpression>(Other);
+    return getType() == OE.getType() && NumOperands == OE.NumOperands &&
+           std::equal(op_begin(), op_end(), OE.op_begin());
+  }
+
+  hash_code getHashValue() const override {
+    return hash_combine(this->Expression::getHashValue(), ValueType,
+                        hash_combine_range(op_begin(), op_end()));
+  }
+
+  // Debugging support
+  void printInternal(raw_ostream &OS, bool PrintEType) const override {
+    if (PrintEType)
+      OS << "ExpressionTypeBasic, ";
+
+    this->Expression::printInternal(OS, false);
+    OS << "operands = {";
+    for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+      OS << "[" << i << "] = ";
+      Operands[i]->printAsOperand(OS);
+      OS << "  ";
+    }
+    OS << "} ";
+  }
+};
+
+class op_inserter
+    : public std::iterator<std::output_iterator_tag, void, void, void, void> {
+private:
+  using Container = BasicExpression;
+
+  Container *BE;
+
+public:
+  explicit op_inserter(BasicExpression &E) : BE(&E) {}
+  explicit op_inserter(BasicExpression *E) : BE(E) {}
+
+  op_inserter &operator=(Value *val) {
+    BE->op_push_back(val);
+    return *this;
+  }
+  op_inserter &operator*() { return *this; }
+  op_inserter &operator++() { return *this; }
+  op_inserter &operator++(int) { return *this; }
+};
+
+class MemoryExpression : public BasicExpression {
+private:
+  const MemoryAccess *MemoryLeader;
+
+public:
+  MemoryExpression(unsigned NumOperands, enum ExpressionType EType,
+                   const MemoryAccess *MemoryLeader)
+      : BasicExpression(NumOperands, EType), MemoryLeader(MemoryLeader) {}
+  MemoryExpression() = delete;
+  MemoryExpression(const MemoryExpression &) = delete;
+  MemoryExpression &operator=(const MemoryExpression &) = delete;
+
+  static bool classof(const Expression *EB) {
+    return EB->getExpressionType() > ET_MemoryStart &&
+           EB->getExpressionType() < ET_MemoryEnd;
+  }
+
+  hash_code getHashValue() const override {
+    return hash_combine(this->BasicExpression::getHashValue(), MemoryLeader);
+  }
+
+  bool equals(const Expression &Other) const override {
+    if (!this->BasicExpression::equals(Other))
+      return false;
+    const MemoryExpression &OtherMCE = cast<MemoryExpression>(Other);
+
+    return MemoryLeader == OtherMCE.MemoryLeader;
+  }
+
+  const MemoryAccess *getMemoryLeader() const { return MemoryLeader; }
+  void setMemoryLeader(const MemoryAccess *ML) { MemoryLeader = ML; }
+};
+
+class CallExpression final : public MemoryExpression {
+private:
+  CallInst *Call;
+
+public:
+  CallExpression(unsigned NumOperands, CallInst *C,
+                 const MemoryAccess *MemoryLeader)
+      : MemoryExpression(NumOperands, ET_Call, MemoryLeader), Call(C) {}
+  CallExpression() = delete;
+  CallExpression(const CallExpression &) = delete;
+  CallExpression &operator=(const CallExpression &) = delete;
+  ~CallExpression() override;
+
+  static bool classof(const Expression *EB) {
+    return EB->getExpressionType() == ET_Call;
+  }
+
+  // Debugging support
+  void printInternal(raw_ostream &OS, bool PrintEType) const override {
+    if (PrintEType)
+      OS << "ExpressionTypeCall, ";
+    this->BasicExpression::printInternal(OS, false);
+    OS << " represents call at ";
+    Call->printAsOperand(OS);
+  }
+};
+
+class LoadExpression final : public MemoryExpression {
+private:
+  LoadInst *Load;
+  unsigned Alignment;
+
+public:
+  LoadExpression(unsigned NumOperands, LoadInst *L,
+                 const MemoryAccess *MemoryLeader)
+      : LoadExpression(ET_Load, NumOperands, L, MemoryLeader) {}
+
+  LoadExpression(enum ExpressionType EType, unsigned NumOperands, LoadInst *L,
+                 const MemoryAccess *MemoryLeader)
+      : MemoryExpression(NumOperands, EType, MemoryLeader), Load(L) {
+    Alignment = L ? L->getAlignment() : 0;
+  }
+
+  LoadExpression() = delete;
+  LoadExpression(const LoadExpression &) = delete;
+  LoadExpression &operator=(const LoadExpression &) = delete;
+  ~LoadExpression() override;
+
+  static bool classof(const Expression *EB) {
+    return EB->getExpressionType() == ET_Load;
+  }
+
+  LoadInst *getLoadInst() const { return Load; }
+  void setLoadInst(LoadInst *L) { Load = L; }
+
+  unsigned getAlignment() const { return Alignment; }
+  void setAlignment(unsigned Align) { Alignment = Align; }
+
+  bool equals(const Expression &Other) const override;
+  bool exactlyEquals(const Expression &Other) const override {
+    return Expression::exactlyEquals(Other) &&
+           cast<LoadExpression>(Other).getLoadInst() == getLoadInst();
+  }
+
+  // Debugging support
+  void printInternal(raw_ostream &OS, bool PrintEType) const override {
+    if (PrintEType)
+      OS << "ExpressionTypeLoad, ";
+    this->BasicExpression::printInternal(OS, false);
+    OS << " represents Load at ";
+    Load->printAsOperand(OS);
+    OS << " with MemoryLeader " << *getMemoryLeader();
+  }
+};
+
+class StoreExpression final : public MemoryExpression {
+private:
+  StoreInst *Store;
+  Value *StoredValue;
+
+public:
+  StoreExpression(unsigned NumOperands, StoreInst *S, Value *StoredValue,
+                  const MemoryAccess *MemoryLeader)
+      : MemoryExpression(NumOperands, ET_Store, MemoryLeader), Store(S),
+        StoredValue(StoredValue) {}
+  StoreExpression() = delete;
+  StoreExpression(const StoreExpression &) = delete;
+  StoreExpression &operator=(const StoreExpression &) = delete;
+  ~StoreExpression() override;
+
+  static bool classof(const Expression *EB) {
+    return EB->getExpressionType() == ET_Store;
+  }
+
+  StoreInst *getStoreInst() const { return Store; }
+  Value *getStoredValue() const { return StoredValue; }
+
+  bool equals(const Expression &Other) const override;
+
+  bool exactlyEquals(const Expression &Other) const override {
+    return Expression::exactlyEquals(Other) &&
+           cast<StoreExpression>(Other).getStoreInst() == getStoreInst();
+  }
+
+  // Debugging support
+  void printInternal(raw_ostream &OS, bool PrintEType) const override {
+    if (PrintEType)
+      OS << "ExpressionTypeStore, ";
+    this->BasicExpression::printInternal(OS, false);
+    OS << " represents Store  " << *Store;
+    OS << " with StoredValue ";
+    StoredValue->printAsOperand(OS);
+    OS << " and MemoryLeader " << *getMemoryLeader();
+  }
+};
+
+class AggregateValueExpression final : public BasicExpression {
+private:
+  unsigned MaxIntOperands;
+  unsigned NumIntOperands = 0;
+  unsigned *IntOperands = nullptr;
+
+public:
+  AggregateValueExpression(unsigned NumOperands, unsigned NumIntOperands)
+      : BasicExpression(NumOperands, ET_AggregateValue),
+        MaxIntOperands(NumIntOperands) {}
+  AggregateValueExpression() = delete;
+  AggregateValueExpression(const AggregateValueExpression &) = delete;
+  AggregateValueExpression &
+  operator=(const AggregateValueExpression &) = delete;
+  ~AggregateValueExpression() override;
+
+  static bool classof(const Expression *EB) {
+    return EB->getExpressionType() == ET_AggregateValue;
+  }
+
+  using int_arg_iterator = unsigned *;
+  using const_int_arg_iterator = const unsigned *;
+
+  int_arg_iterator int_op_begin() { return IntOperands; }
+  int_arg_iterator int_op_end() { return IntOperands + NumIntOperands; }
+  const_int_arg_iterator int_op_begin() const { return IntOperands; }
+  const_int_arg_iterator int_op_end() const {
+    return IntOperands + NumIntOperands;
+  }
+  unsigned int_op_size() const { return NumIntOperands; }
+  bool int_op_empty() const { return NumIntOperands == 0; }
+  void int_op_push_back(unsigned IntOperand) {
+    assert(NumIntOperands < MaxIntOperands &&
+           "Tried to add too many int operands");
+    assert(IntOperands && "Operands not allocated before pushing");
+    IntOperands[NumIntOperands++] = IntOperand;
+  }
+
+  virtual void allocateIntOperands(BumpPtrAllocator &Allocator) {
+    assert(!IntOperands && "Operands already allocated");
+    IntOperands = Allocator.Allocate<unsigned>(MaxIntOperands);
+  }
+
+  bool equals(const Expression &Other) const override {
+    if (!this->BasicExpression::equals(Other))
+      return false;
+    const AggregateValueExpression &OE = cast<AggregateValueExpression>(Other);
+    return NumIntOperands == OE.NumIntOperands &&
+           std::equal(int_op_begin(), int_op_end(), OE.int_op_begin());
+  }
+
+  hash_code getHashValue() const override {
+    return hash_combine(this->BasicExpression::getHashValue(),
+                        hash_combine_range(int_op_begin(), int_op_end()));
+  }
+
+  // Debugging support
+  void printInternal(raw_ostream &OS, bool PrintEType) const override {
+    if (PrintEType)
+      OS << "ExpressionTypeAggregateValue, ";
+    this->BasicExpression::printInternal(OS, false);
+    OS << ", intoperands = {";
+    for (unsigned i = 0, e = int_op_size(); i != e; ++i) {
+      OS << "[" << i << "] = " << IntOperands[i] << "  ";
+    }
+    OS << "}";
+  }
+};
+
+class int_op_inserter
+    : public std::iterator<std::output_iterator_tag, void, void, void, void> {
+private:
+  using Container = AggregateValueExpression;
+
+  Container *AVE;
+
+public:
+  explicit int_op_inserter(AggregateValueExpression &E) : AVE(&E) {}
+  explicit int_op_inserter(AggregateValueExpression *E) : AVE(E) {}
+
+  int_op_inserter &operator=(unsigned int val) {
+    AVE->int_op_push_back(val);
+    return *this;
+  }
+  int_op_inserter &operator*() { return *this; }
+  int_op_inserter &operator++() { return *this; }
+  int_op_inserter &operator++(int) { return *this; }
+};
+
+class PHIExpression final : public BasicExpression {
+private:
+  BasicBlock *BB;
+
+public:
+  PHIExpression(unsigned NumOperands, BasicBlock *B)
+      : BasicExpression(NumOperands, ET_Phi), BB(B) {}
+  PHIExpression() = delete;
+  PHIExpression(const PHIExpression &) = delete;
+  PHIExpression &operator=(const PHIExpression &) = delete;
+  ~PHIExpression() override;
+
+  static bool classof(const Expression *EB) {
+    return EB->getExpressionType() == ET_Phi;
+  }
+
+  bool equals(const Expression &Other) const override {
+    if (!this->BasicExpression::equals(Other))
+      return false;
+    const PHIExpression &OE = cast<PHIExpression>(Other);
+    return BB == OE.BB;
+  }
+
+  hash_code getHashValue() const override {
+    return hash_combine(this->BasicExpression::getHashValue(), BB);
+  }
+
+  // Debugging support
+  void printInternal(raw_ostream &OS, bool PrintEType) const override {
+    if (PrintEType)
+      OS << "ExpressionTypePhi, ";
+    this->BasicExpression::printInternal(OS, false);
+    OS << "bb = " << BB;
+  }
+};
+
+class DeadExpression final : public Expression {
+public:
+  DeadExpression() : Expression(ET_Dead) {}
+  DeadExpression(const DeadExpression &) = delete;
+  DeadExpression &operator=(const DeadExpression &) = delete;
+
+  static bool classof(const Expression *E) {
+    return E->getExpressionType() == ET_Dead;
+  }
+};
+
+class VariableExpression final : public Expression {
+private:
+  Value *VariableValue;
+
+public:
+  VariableExpression(Value *V) : Expression(ET_Variable), VariableValue(V) {}
+  VariableExpression() = delete;
+  VariableExpression(const VariableExpression &) = delete;
+  VariableExpression &operator=(const VariableExpression &) = delete;
+
+  static bool classof(const Expression *EB) {
+    return EB->getExpressionType() == ET_Variable;
+  }
+
+  Value *getVariableValue() const { return VariableValue; }
+  void setVariableValue(Value *V) { VariableValue = V; }
+
+  bool equals(const Expression &Other) const override {
+    const VariableExpression &OC = cast<VariableExpression>(Other);
+    return VariableValue == OC.VariableValue;
+  }
+
+  hash_code getHashValue() const override {
+    return hash_combine(this->Expression::getHashValue(),
+                        VariableValue->getType(), VariableValue);
+  }
+
+  // Debugging support
+  void printInternal(raw_ostream &OS, bool PrintEType) const override {
+    if (PrintEType)
+      OS << "ExpressionTypeVariable, ";
+    this->Expression::printInternal(OS, false);
+    OS << " variable = " << *VariableValue;
+  }
+};
+
+class ConstantExpression final : public Expression {
+private:
+  Constant *ConstantValue = nullptr;
+
+public:
+  ConstantExpression() : Expression(ET_Constant) {}
+  ConstantExpression(Constant *constantValue)
+      : Expression(ET_Constant), ConstantValue(constantValue) {}
+  ConstantExpression(const ConstantExpression &) = delete;
+  ConstantExpression &operator=(const ConstantExpression &) = delete;
+
+  static bool classof(const Expression *EB) {
+    return EB->getExpressionType() == ET_Constant;
+  }
+
+  Constant *getConstantValue() const { return ConstantValue; }
+  void setConstantValue(Constant *V) { ConstantValue = V; }
+
+  bool equals(const Expression &Other) const override {
+    const ConstantExpression &OC = cast<ConstantExpression>(Other);
+    return ConstantValue == OC.ConstantValue;
+  }
+
+  hash_code getHashValue() const override {
+    return hash_combine(this->Expression::getHashValue(),
+                        ConstantValue->getType(), ConstantValue);
+  }
+
+  // Debugging support
+  void printInternal(raw_ostream &OS, bool PrintEType) const override {
+    if (PrintEType)
+      OS << "ExpressionTypeConstant, ";
+    this->Expression::printInternal(OS, false);
+    OS << " constant = " << *ConstantValue;
+  }
+};
+
+class UnknownExpression final : public Expression {
+private:
+  Instruction *Inst;
+
+public:
+  UnknownExpression(Instruction *I) : Expression(ET_Unknown), Inst(I) {}
+  UnknownExpression() = delete;
+  UnknownExpression(const UnknownExpression &) = delete;
+  UnknownExpression &operator=(const UnknownExpression &) = delete;
+
+  static bool classof(const Expression *EB) {
+    return EB->getExpressionType() == ET_Unknown;
+  }
+
+  Instruction *getInstruction() const { return Inst; }
+  void setInstruction(Instruction *I) { Inst = I; }
+
+  bool equals(const Expression &Other) const override {
+    const auto &OU = cast<UnknownExpression>(Other);
+    return Inst == OU.Inst;
+  }
+
+  hash_code getHashValue() const override {
+    return hash_combine(this->Expression::getHashValue(), Inst);
+  }
+
+  // Debugging support
+  void printInternal(raw_ostream &OS, bool PrintEType) const override {
+    if (PrintEType)
+      OS << "ExpressionTypeUnknown, ";
+    this->Expression::printInternal(OS, false);
+    OS << " inst = " << *Inst;
+  }
+};
+
+} // end namespace GVNExpression
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_GVNEXPRESSION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/GuardWidening.h b/linux-x64/clang/include/llvm/Transforms/Scalar/GuardWidening.h
new file mode 100644
index 0000000..2bc0940
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/GuardWidening.h
@@ -0,0 +1,32 @@
+//===- GuardWidening.h - ----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Guard widening is an optimization over the @llvm.experimental.guard intrinsic
+// that (optimistically) combines multiple guards into one to have fewer checks
+// at runtime.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_TRANSFORMS_SCALAR_GUARD_WIDENING_H
+#define LLVM_TRANSFORMS_SCALAR_GUARD_WIDENING_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+struct GuardWideningPass : public PassInfoMixin<GuardWideningPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+}
+
+
+#endif  // LLVM_TRANSFORMS_SCALAR_GUARD_WIDENING_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/IVUsersPrinter.h b/linux-x64/clang/include/llvm/Transforms/Scalar/IVUsersPrinter.h
new file mode 100644
index 0000000..fad00d8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/IVUsersPrinter.h
@@ -0,0 +1,30 @@
+//===- IVUsersPrinter.h - Induction Variable Users Printing -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_IVUSERSPRINTER_H
+#define LLVM_TRANSFORMS_SCALAR_IVUSERSPRINTER_H
+
+#include "llvm/Analysis/IVUsers.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+/// Printer pass for the \c IVUsers for a loop.
+class IVUsersPrinterPass : public PassInfoMixin<IVUsersPrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit IVUsersPrinterPass(raw_ostream &OS) : OS(OS) {}
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/IndVarSimplify.h b/linux-x64/clang/include/llvm/Transforms/Scalar/IndVarSimplify.h
new file mode 100644
index 0000000..e321c8f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/IndVarSimplify.h
@@ -0,0 +1,34 @@
+//===- IndVarSimplify.h - Induction Variable Simplification -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface for the Induction Variable
+// Simplification pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_INDVARSIMPLIFY_H
+#define LLVM_TRANSFORMS_SCALAR_INDVARSIMPLIFY_H
+
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Loop;
+class LPMUpdater;
+
+class IndVarSimplifyPass : public PassInfoMixin<IndVarSimplifyPass> {
+public:
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_INDVARSIMPLIFY_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/InductiveRangeCheckElimination.h b/linux-x64/clang/include/llvm/Transforms/Scalar/InductiveRangeCheckElimination.h
new file mode 100644
index 0000000..311c549
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/InductiveRangeCheckElimination.h
@@ -0,0 +1,31 @@
+//===- InductiveRangeCheckElimination.h - IRCE ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface for the Inductive Range Check Elimination
+// loop pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_INDUCTIVERANGECHECKELIMINATION_H
+#define LLVM_TRANSFORMS_SCALAR_INDUCTIVERANGECHECKELIMINATION_H
+
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+class IRCEPass : public PassInfoMixin<IRCEPass> {
+public:
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_INDUCTIVERANGECHECKELIMINATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/JumpThreading.h b/linux-x64/clang/include/llvm/Transforms/Scalar/JumpThreading.h
new file mode 100644
index 0000000..b3493a2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/JumpThreading.h
@@ -0,0 +1,161 @@
+//===- JumpThreading.h - thread control through conditional BBs -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// See the comments on JumpThreadingPass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_JUMPTHREADING_H
+#define LLVM_TRANSFORMS_SCALAR_JUMPTHREADING_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/BlockFrequencyInfo.h"
+#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/IR/ValueHandle.h"
+#include <memory>
+#include <utility>
+
+namespace llvm {
+
+class BasicBlock;
+class BinaryOperator;
+class BranchInst;
+class CmpInst;
+class Constant;
+class DeferredDominance;
+class Function;
+class Instruction;
+class IntrinsicInst;
+class LazyValueInfo;
+class LoadInst;
+class PHINode;
+class TargetLibraryInfo;
+class Value;
+
+/// A private "module" namespace for types and utilities used by
+/// JumpThreading.
+/// These are implementation details and should not be used by clients.
+namespace jumpthreading {
+
+// These are at global scope so static functions can use them too.
+using PredValueInfo = SmallVectorImpl<std::pair<Constant *, BasicBlock *>>;
+using PredValueInfoTy = SmallVector<std::pair<Constant *, BasicBlock *>, 8>;
+
+// This is used to keep track of what kind of constant we're currently hoping
+// to find.
+enum ConstantPreference { WantInteger, WantBlockAddress };
+
+} // end namespace jumpthreading
+
+/// This pass performs 'jump threading', which looks at blocks that have
+/// multiple predecessors and multiple successors.  If one or more of the
+/// predecessors of the block can be proven to always jump to one of the
+/// successors, we forward the edge from the predecessor to the successor by
+/// duplicating the contents of this block.
+///
+/// An example of when this can occur is code like this:
+///
+///   if () { ...
+///     X = 4;
+///   }
+///   if (X < 3) {
+///
+/// In this case, the unconditional branch at the end of the first if can be
+/// revectored to the false side of the second if.
+class JumpThreadingPass : public PassInfoMixin<JumpThreadingPass> {
+  TargetLibraryInfo *TLI;
+  LazyValueInfo *LVI;
+  AliasAnalysis *AA;
+  DeferredDominance *DDT;
+  std::unique_ptr<BlockFrequencyInfo> BFI;
+  std::unique_ptr<BranchProbabilityInfo> BPI;
+  bool HasProfileData = false;
+  bool HasGuards = false;
+#ifdef NDEBUG
+  SmallPtrSet<const BasicBlock *, 16> LoopHeaders;
+#else
+  SmallSet<AssertingVH<const BasicBlock>, 16> LoopHeaders;
+#endif
+  DenseSet<std::pair<Value *, BasicBlock *>> RecursionSet;
+
+  unsigned BBDupThreshold;
+
+  // RAII helper for updating the recursion stack.
+  struct RecursionSetRemover {
+    DenseSet<std::pair<Value *, BasicBlock *>> &TheSet;
+    std::pair<Value *, BasicBlock *> ThePair;
+
+    RecursionSetRemover(DenseSet<std::pair<Value *, BasicBlock *>> &S,
+                        std::pair<Value *, BasicBlock *> P)
+        : TheSet(S), ThePair(P) {}
+
+    ~RecursionSetRemover() { TheSet.erase(ThePair); }
+  };
+
+public:
+  JumpThreadingPass(int T = -1);
+
+  // Glue for old PM.
+  bool runImpl(Function &F, TargetLibraryInfo *TLI_, LazyValueInfo *LVI_,
+               AliasAnalysis *AA_, DeferredDominance *DDT_,
+               bool HasProfileData_, std::unique_ptr<BlockFrequencyInfo> BFI_,
+               std::unique_ptr<BranchProbabilityInfo> BPI_);
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  void releaseMemory() {
+    BFI.reset();
+    BPI.reset();
+  }
+
+  void FindLoopHeaders(Function &F);
+  bool ProcessBlock(BasicBlock *BB);
+  bool ThreadEdge(BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs,
+                  BasicBlock *SuccBB);
+  bool DuplicateCondBranchOnPHIIntoPred(
+      BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs);
+
+  bool
+  ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,
+                                  jumpthreading::PredValueInfo &Result,
+                                  jumpthreading::ConstantPreference Preference,
+                                  Instruction *CxtI = nullptr);
+  bool ProcessThreadableEdges(Value *Cond, BasicBlock *BB,
+                              jumpthreading::ConstantPreference Preference,
+                              Instruction *CxtI = nullptr);
+
+  bool ProcessBranchOnPHI(PHINode *PN);
+  bool ProcessBranchOnXOR(BinaryOperator *BO);
+  bool ProcessImpliedCondition(BasicBlock *BB);
+
+  bool SimplifyPartiallyRedundantLoad(LoadInst *LI);
+  bool TryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB);
+  bool TryToUnfoldSelectInCurrBB(BasicBlock *BB);
+
+  bool ProcessGuards(BasicBlock *BB);
+  bool ThreadGuard(BasicBlock *BB, IntrinsicInst *Guard, BranchInst *BI);
+
+private:
+  BasicBlock *SplitBlockPreds(BasicBlock *BB, ArrayRef<BasicBlock *> Preds,
+                              const char *Suffix);
+  void UpdateBlockFreqAndEdgeWeight(BasicBlock *PredBB, BasicBlock *BB,
+                                    BasicBlock *NewBB, BasicBlock *SuccBB);
+  /// Check if the block has profile metadata for its outgoing edges.
+  bool doesBlockHaveProfileData(BasicBlock *BB);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_JUMPTHREADING_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LICM.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LICM.h
new file mode 100644
index 0000000..68ad190
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LICM.h
@@ -0,0 +1,50 @@
+//===- LICM.h - Loop Invariant Code Motion Pass -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass performs loop invariant code motion, attempting to remove as much
+// code from the body of a loop as possible.  It does this by either hoisting
+// code into the preheader block, or by sinking code to the exit blocks if it is
+// safe.  This pass also promotes must-aliased memory locations in the loop to
+// live in registers, thus hoisting and sinking "invariant" loads and stores.
+//
+// This pass uses alias analysis for two purposes:
+//
+//  1. Moving loop invariant loads and calls out of loops.  If we can determine
+//     that a load or call inside of a loop never aliases anything stored to,
+//     we can hoist it or sink it like any other instruction.
+//  2. Scalar Promotion of Memory - If there is a store instruction inside of
+//     the loop, we try to move the store to happen AFTER the loop instead of
+//     inside of the loop.  This can only happen if a few conditions are true:
+//       A. The pointer stored through is loop invariant
+//       B. There are no stores or loads in the loop which _may_ alias the
+//          pointer.  There are no calls in the loop which mod/ref the pointer.
+//     If these conditions are true, we can promote the loads and stores in the
+//     loop of the pointer to use a temporary alloca'd variable.  We then use
+//     the SSAUpdater to construct the appropriate SSA form for the value.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LICM_H
+#define LLVM_TRANSFORMS_SCALAR_LICM_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+/// Performs Loop Invariant Code Motion Pass.
+class LICMPass : public PassInfoMixin<LICMPass> {
+public:
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LICM_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopAccessAnalysisPrinter.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopAccessAnalysisPrinter.h
new file mode 100644
index 0000000..5eddd5f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopAccessAnalysisPrinter.h
@@ -0,0 +1,31 @@
+//===- llvm/Analysis/LoopAccessAnalysisPrinter.h ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPACCESSANALYSISPRINTER_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPACCESSANALYSISPRINTER_H
+
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+/// \brief Printer pass for the \c LoopAccessInfo results.
+class LoopAccessInfoPrinterPass
+    : public PassInfoMixin<LoopAccessInfoPrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit LoopAccessInfoPrinterPass(raw_ostream &OS) : OS(OS) {}
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopDataPrefetch.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopDataPrefetch.h
new file mode 100644
index 0000000..12c7a03
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopDataPrefetch.h
@@ -0,0 +1,33 @@
+//===-------- LoopDataPrefetch.h - Loop Data Prefetching Pass ---*- C++ -*-===//
+//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for LLVM's Loop Data Prefetching Pass.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPDATAPREFETCH_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPDATAPREFETCH_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// An optimization pass inserting data prefetches in loops.
+class LoopDataPrefetchPass : public PassInfoMixin<LoopDataPrefetchPass> {
+public:
+  LoopDataPrefetchPass() = default;
+
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPDATAPREFETCH_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopDeletion.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopDeletion.h
new file mode 100644
index 0000000..7b8cb1e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopDeletion.h
@@ -0,0 +1,35 @@
+//===- LoopDeletion.h - Loop Deletion ---------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface for the Loop Deletion Pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPDELETION_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPDELETION_H
+
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+class LoopDeletionPass : public PassInfoMixin<LoopDeletionPass> {
+public:
+  LoopDeletionPass() = default;
+
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPDELETION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopDistribute.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopDistribute.h
new file mode 100644
index 0000000..2bf1c9d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopDistribute.h
@@ -0,0 +1,33 @@
+//===- LoopDistribute.cpp - Loop Distribution Pass --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Loop Distribution Pass.  Its main focus is to
+// distribute loops that cannot be vectorized due to dependence cycles.  It
+// tries to isolate the offending dependences into a new loop allowing
+// vectorization of the remaining parts.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPDISTRIBUTE_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPDISTRIBUTE_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+class LoopDistributePass : public PassInfoMixin<LoopDistributePass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPDISTRIBUTE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h
new file mode 100644
index 0000000..7added8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h
@@ -0,0 +1,36 @@
+//===- LoopIdiomRecognize.h - Loop Idiom Recognize Pass ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass implements an idiom recognizer that transforms simple loops into a
+// non-loop form.  In cases that this kicks in, it can be a significant
+// performance win.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPIDIOMRECOGNIZE_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPIDIOMRECOGNIZE_H
+
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Loop;
+class LPMUpdater;
+
+/// Performs Loop Idiom Recognize Pass.
+class LoopIdiomRecognizePass : public PassInfoMixin<LoopIdiomRecognizePass> {
+public:
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPIDIOMRECOGNIZE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopLoadElimination.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopLoadElimination.h
new file mode 100644
index 0000000..b0514a4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopLoadElimination.h
@@ -0,0 +1,34 @@
+//===- LoopLoadElimination.h ------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This header defines the LoopLoadEliminationPass object. This pass forwards
+/// loaded values around loop backedges to allow their use in subsequent
+/// iterations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPLOADELIMINATION_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPLOADELIMINATION_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+/// Pass to forward loads in a loop around the backedge to subsequent
+/// iterations.
+struct LoopLoadEliminationPass : public PassInfoMixin<LoopLoadEliminationPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPLOADELIMINATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopPassManager.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopPassManager.h
new file mode 100644
index 0000000..56a45ed
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopPassManager.h
@@ -0,0 +1,406 @@
+//===- LoopPassManager.h - Loop pass management -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This header provides classes for managing a pipeline of passes over loops
+/// in LLVM IR.
+///
+/// The primary loop pass pipeline is managed in a very particular way to
+/// provide a set of core guarantees:
+/// 1) Loops are, where possible, in simplified form.
+/// 2) Loops are *always* in LCSSA form.
+/// 3) A collection of Loop-specific analysis results are available:
+///    - LoopInfo
+///    - DominatorTree
+///    - ScalarEvolution
+///    - AAManager
+/// 4) All loop passes preserve #1 (where possible), #2, and #3.
+/// 5) Loop passes run over each loop in the loop nest from the innermost to
+///    the outermost. Specifically, all inner loops are processed before
+///    passes run over outer loops. When running the pipeline across an inner
+///    loop creates new inner loops, those are added and processed in this
+///    order as well.
+///
+/// This process is designed to facilitate transformations which simplify,
+/// reduce, and remove loops. For passes which are more oriented towards
+/// optimizing loops, especially optimizing loop *nests* instead of single
+/// loops in isolation, this framework is less interesting.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPPASSMANAGER_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPPASSMANAGER_H
+
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/PriorityWorklist.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/BasicAliasAnalysis.h"
+#include "llvm/Analysis/GlobalsModRef.h"
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Utils/LCSSA.h"
+#include "llvm/Transforms/Utils/LoopSimplify.h"
+
+namespace llvm {
+
+// Forward declarations of an update tracking API used in the pass manager.
+class LPMUpdater;
+
+// Explicit specialization and instantiation declarations for the pass manager.
+// See the comments on the definition of the specialization for details on how
+// it differs from the primary template.
+template <>
+PreservedAnalyses
+PassManager<Loop, LoopAnalysisManager, LoopStandardAnalysisResults &,
+            LPMUpdater &>::run(Loop &InitialL, LoopAnalysisManager &AM,
+                               LoopStandardAnalysisResults &AnalysisResults,
+                               LPMUpdater &U);
+extern template class PassManager<Loop, LoopAnalysisManager,
+                                  LoopStandardAnalysisResults &, LPMUpdater &>;
+
+/// \brief The Loop pass manager.
+///
+/// See the documentation for the PassManager template for details. It runs
+/// a sequence of Loop passes over each Loop that the manager is run over. This
+/// typedef serves as a convenient way to refer to this construct.
+typedef PassManager<Loop, LoopAnalysisManager, LoopStandardAnalysisResults &,
+                    LPMUpdater &>
+    LoopPassManager;
+
+/// A partial specialization of the require analysis template pass to forward
+/// the extra parameters from a transformation's run method to the
+/// AnalysisManager's getResult.
+template <typename AnalysisT>
+struct RequireAnalysisPass<AnalysisT, Loop, LoopAnalysisManager,
+                           LoopStandardAnalysisResults &, LPMUpdater &>
+    : PassInfoMixin<
+          RequireAnalysisPass<AnalysisT, Loop, LoopAnalysisManager,
+                              LoopStandardAnalysisResults &, LPMUpdater &>> {
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &) {
+    (void)AM.template getResult<AnalysisT>(L, AR);
+    return PreservedAnalyses::all();
+  }
+};
+
+/// An alias template to easily name a require analysis loop pass.
+template <typename AnalysisT>
+using RequireAnalysisLoopPass =
+    RequireAnalysisPass<AnalysisT, Loop, LoopAnalysisManager,
+                        LoopStandardAnalysisResults &, LPMUpdater &>;
+
+namespace internal {
+/// Helper to implement appending of loops onto a worklist.
+///
+/// We want to process loops in postorder, but the worklist is a LIFO data
+/// structure, so we append to it in *reverse* postorder.
+///
+/// For trees, a preorder traversal is a viable reverse postorder, so we
+/// actually append using a preorder walk algorithm.
+template <typename RangeT>
+inline void appendLoopsToWorklist(RangeT &&Loops,
+                                  SmallPriorityWorklist<Loop *, 4> &Worklist) {
+  // We use an internal worklist to build up the preorder traversal without
+  // recursion.
+  SmallVector<Loop *, 4> PreOrderLoops, PreOrderWorklist;
+
+  // We walk the initial sequence of loops in reverse because we generally want
+  // to visit defs before uses and the worklist is LIFO.
+  for (Loop *RootL : reverse(Loops)) {
+    assert(PreOrderLoops.empty() && "Must start with an empty preorder walk.");
+    assert(PreOrderWorklist.empty() &&
+           "Must start with an empty preorder walk worklist.");
+    PreOrderWorklist.push_back(RootL);
+    do {
+      Loop *L = PreOrderWorklist.pop_back_val();
+      PreOrderWorklist.append(L->begin(), L->end());
+      PreOrderLoops.push_back(L);
+    } while (!PreOrderWorklist.empty());
+
+    Worklist.insert(std::move(PreOrderLoops));
+    PreOrderLoops.clear();
+  }
+}
+}
+
+template <typename LoopPassT> class FunctionToLoopPassAdaptor;
+
+/// This class provides an interface for updating the loop pass manager based
+/// on mutations to the loop nest.
+///
+/// A reference to an instance of this class is passed as an argument to each
+/// Loop pass, and Loop passes should use it to update LPM infrastructure if
+/// they modify the loop nest structure.
+class LPMUpdater {
+public:
+  /// This can be queried by loop passes which run other loop passes (like pass
+  /// managers) to know whether the loop needs to be skipped due to updates to
+  /// the loop nest.
+  ///
+  /// If this returns true, the loop object may have been deleted, so passes
+  /// should take care not to touch the object.
+  bool skipCurrentLoop() const { return SkipCurrentLoop; }
+
+  /// Loop passes should use this method to indicate they have deleted a loop
+  /// from the nest.
+  ///
+  /// Note that this loop must either be the current loop or a subloop of the
+  /// current loop. This routine must be called prior to removing the loop from
+  /// the loop nest.
+  ///
+  /// If this is called for the current loop, in addition to clearing any
+  /// state, this routine will mark that the current loop should be skipped by
+  /// the rest of the pass management infrastructure.
+  void markLoopAsDeleted(Loop &L, llvm::StringRef Name) {
+    LAM.clear(L, Name);
+    assert((&L == CurrentL || CurrentL->contains(&L)) &&
+           "Cannot delete a loop outside of the "
+           "subloop tree currently being processed.");
+    if (&L == CurrentL)
+      SkipCurrentLoop = true;
+  }
+
+  /// Loop passes should use this method to indicate they have added new child
+  /// loops of the current loop.
+  ///
+  /// \p NewChildLoops must contain only the immediate children. Any nested
+  /// loops within them will be visited in postorder as usual for the loop pass
+  /// manager.
+  void addChildLoops(ArrayRef<Loop *> NewChildLoops) {
+    // Insert ourselves back into the worklist first, as this loop should be
+    // revisited after all the children have been processed.
+    Worklist.insert(CurrentL);
+
+#ifndef NDEBUG
+    for (Loop *NewL : NewChildLoops)
+      assert(NewL->getParentLoop() == CurrentL && "All of the new loops must "
+                                                  "be immediate children of "
+                                                  "the current loop!");
+#endif
+
+    internal::appendLoopsToWorklist(NewChildLoops, Worklist);
+
+    // Also skip further processing of the current loop--it will be revisited
+    // after all of its newly added children are accounted for.
+    SkipCurrentLoop = true;
+  }
+
+  /// Loop passes should use this method to indicate they have added new
+  /// sibling loops to the current loop.
+  ///
+  /// \p NewSibLoops must only contain the immediate sibling loops. Any nested
+  /// loops within them will be visited in postorder as usual for the loop pass
+  /// manager.
+  void addSiblingLoops(ArrayRef<Loop *> NewSibLoops) {
+#ifndef NDEBUG
+    for (Loop *NewL : NewSibLoops)
+      assert(NewL->getParentLoop() == ParentL &&
+             "All of the new loops must be siblings of the current loop!");
+#endif
+
+    internal::appendLoopsToWorklist(NewSibLoops, Worklist);
+
+    // No need to skip the current loop or revisit it, as sibling loops
+    // shouldn't impact anything.
+  }
+
+  /// Restart the current loop.
+  ///
+  /// Loop passes should call this method to indicate the current loop has been
+  /// sufficiently changed that it should be re-visited from the begining of
+  /// the loop pass pipeline rather than continuing.
+  void revisitCurrentLoop() {
+    // Tell the currently in-flight pipeline to stop running.
+    SkipCurrentLoop = true;
+
+    // And insert ourselves back into the worklist.
+    Worklist.insert(CurrentL);
+  }
+
+private:
+  template <typename LoopPassT> friend class llvm::FunctionToLoopPassAdaptor;
+
+  /// The \c FunctionToLoopPassAdaptor's worklist of loops to process.
+  SmallPriorityWorklist<Loop *, 4> &Worklist;
+
+  /// The analysis manager for use in the current loop nest.
+  LoopAnalysisManager &LAM;
+
+  Loop *CurrentL;
+  bool SkipCurrentLoop;
+
+#ifndef NDEBUG
+  // In debug builds we also track the parent loop to implement asserts even in
+  // the face of loop deletion.
+  Loop *ParentL;
+#endif
+
+  LPMUpdater(SmallPriorityWorklist<Loop *, 4> &Worklist,
+             LoopAnalysisManager &LAM)
+      : Worklist(Worklist), LAM(LAM) {}
+};
+
+/// \brief Adaptor that maps from a function to its loops.
+///
+/// Designed to allow composition of a LoopPass(Manager) and a
+/// FunctionPassManager. Note that if this pass is constructed with a \c
+/// FunctionAnalysisManager it will run the \c LoopAnalysisManagerFunctionProxy
+/// analysis prior to running the loop passes over the function to enable a \c
+/// LoopAnalysisManager to be used within this run safely.
+template <typename LoopPassT>
+class FunctionToLoopPassAdaptor
+    : public PassInfoMixin<FunctionToLoopPassAdaptor<LoopPassT>> {
+public:
+  explicit FunctionToLoopPassAdaptor(LoopPassT Pass, bool DebugLogging = false)
+      : Pass(std::move(Pass)), LoopCanonicalizationFPM(DebugLogging) {
+    LoopCanonicalizationFPM.addPass(LoopSimplifyPass());
+    LoopCanonicalizationFPM.addPass(LCSSAPass());
+  }
+
+  /// \brief Runs the loop passes across every loop in the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM) {
+    // Before we even compute any loop analyses, first run a miniature function
+    // pass pipeline to put loops into their canonical form. Note that we can
+    // directly build up function analyses after this as the function pass
+    // manager handles all the invalidation at that layer.
+    PreservedAnalyses PA = LoopCanonicalizationFPM.run(F, AM);
+
+    // Get the loop structure for this function
+    LoopInfo &LI = AM.getResult<LoopAnalysis>(F);
+
+    // If there are no loops, there is nothing to do here.
+    if (LI.empty())
+      return PA;
+
+    // Get the analysis results needed by loop passes.
+    MemorySSA *MSSA = EnableMSSALoopDependency
+                          ? (&AM.getResult<MemorySSAAnalysis>(F).getMSSA())
+                          : nullptr;
+    LoopStandardAnalysisResults LAR = {AM.getResult<AAManager>(F),
+                                       AM.getResult<AssumptionAnalysis>(F),
+                                       AM.getResult<DominatorTreeAnalysis>(F),
+                                       AM.getResult<LoopAnalysis>(F),
+                                       AM.getResult<ScalarEvolutionAnalysis>(F),
+                                       AM.getResult<TargetLibraryAnalysis>(F),
+                                       AM.getResult<TargetIRAnalysis>(F),
+                                       MSSA};
+
+    // Setup the loop analysis manager from its proxy. It is important that
+    // this is only done when there are loops to process and we have built the
+    // LoopStandardAnalysisResults object. The loop analyses cached in this
+    // manager have access to those analysis results and so it must invalidate
+    // itself when they go away.
+    LoopAnalysisManager &LAM =
+        AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
+
+    // A postorder worklist of loops to process.
+    SmallPriorityWorklist<Loop *, 4> Worklist;
+
+    // Register the worklist and loop analysis manager so that loop passes can
+    // update them when they mutate the loop nest structure.
+    LPMUpdater Updater(Worklist, LAM);
+
+    // Add the loop nests in the reverse order of LoopInfo. For some reason,
+    // they are stored in RPO w.r.t. the control flow graph in LoopInfo. For
+    // the purpose of unrolling, loop deletion, and LICM, we largely want to
+    // work forward across the CFG so that we visit defs before uses and can
+    // propagate simplifications from one loop nest into the next.
+    // FIXME: Consider changing the order in LoopInfo.
+    internal::appendLoopsToWorklist(reverse(LI), Worklist);
+
+    do {
+      Loop *L = Worklist.pop_back_val();
+
+      // Reset the update structure for this loop.
+      Updater.CurrentL = L;
+      Updater.SkipCurrentLoop = false;
+
+#ifndef NDEBUG
+      // Save a parent loop pointer for asserts.
+      Updater.ParentL = L->getParentLoop();
+
+      // Verify the loop structure and LCSSA form before visiting the loop.
+      L->verifyLoop();
+      assert(L->isRecursivelyLCSSAForm(LAR.DT, LI) &&
+             "Loops must remain in LCSSA form!");
+#endif
+
+      PreservedAnalyses PassPA = Pass.run(*L, LAM, LAR, Updater);
+      // FIXME: We should verify the set of analyses relevant to Loop passes
+      // are preserved.
+
+      // If the loop hasn't been deleted, we need to handle invalidation here.
+      if (!Updater.skipCurrentLoop())
+        // We know that the loop pass couldn't have invalidated any other
+        // loop's analyses (that's the contract of a loop pass), so directly
+        // handle the loop analysis manager's invalidation here.
+        LAM.invalidate(*L, PassPA);
+
+      // Then intersect the preserved set so that invalidation of module
+      // analyses will eventually occur when the module pass completes.
+      PA.intersect(std::move(PassPA));
+    } while (!Worklist.empty());
+
+    // By definition we preserve the proxy. We also preserve all analyses on
+    // Loops. This precludes *any* invalidation of loop analyses by the proxy,
+    // but that's OK because we've taken care to invalidate analyses in the
+    // loop analysis manager incrementally above.
+    PA.preserveSet<AllAnalysesOn<Loop>>();
+    PA.preserve<LoopAnalysisManagerFunctionProxy>();
+    // We also preserve the set of standard analyses.
+    PA.preserve<DominatorTreeAnalysis>();
+    PA.preserve<LoopAnalysis>();
+    PA.preserve<ScalarEvolutionAnalysis>();
+    // FIXME: Uncomment this when all loop passes preserve MemorySSA
+    // PA.preserve<MemorySSAAnalysis>();
+    // FIXME: What we really want to do here is preserve an AA category, but
+    // that concept doesn't exist yet.
+    PA.preserve<AAManager>();
+    PA.preserve<BasicAA>();
+    PA.preserve<GlobalsAA>();
+    PA.preserve<SCEVAA>();
+    return PA;
+  }
+
+private:
+  LoopPassT Pass;
+
+  FunctionPassManager LoopCanonicalizationFPM;
+};
+
+/// \brief A function to deduce a loop pass type and wrap it in the templated
+/// adaptor.
+template <typename LoopPassT>
+FunctionToLoopPassAdaptor<LoopPassT>
+createFunctionToLoopPassAdaptor(LoopPassT Pass, bool DebugLogging = false) {
+  return FunctionToLoopPassAdaptor<LoopPassT>(std::move(Pass), DebugLogging);
+}
+
+/// \brief Pass for printing a loop's contents as textual IR.
+class PrintLoopPass : public PassInfoMixin<PrintLoopPass> {
+  raw_ostream &OS;
+  std::string Banner;
+
+public:
+  PrintLoopPass();
+  PrintLoopPass(raw_ostream &OS, const std::string &Banner = "");
+
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &,
+                        LoopStandardAnalysisResults &, LPMUpdater &);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPPASSMANAGER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopPredication.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopPredication.h
new file mode 100644
index 0000000..57398bd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopPredication.h
@@ -0,0 +1,32 @@
+//===- LoopPredication.h - Guard based loop predication pass ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass tries to convert loop variant range checks to loop invariant by
+// widening checks across loop iterations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPPREDICATION_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPPREDICATION_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+/// Performs Loop Predication Pass.
+class LoopPredicationPass : public PassInfoMixin<LoopPredicationPass> {
+public:
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPPREDICATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopRotation.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopRotation.h
new file mode 100644
index 0000000..ea8d561
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopRotation.h
@@ -0,0 +1,35 @@
+//===- LoopRotation.h - Loop Rotation -------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface for the Loop Rotation pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPROTATION_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPROTATION_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+/// A simple loop rotation transformation.
+class LoopRotatePass : public PassInfoMixin<LoopRotatePass> {
+public:
+  LoopRotatePass(bool EnableHeaderDuplication = true);
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+
+private:
+  const bool EnableHeaderDuplication;
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPROTATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopSimplifyCFG.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopSimplifyCFG.h
new file mode 100644
index 0000000..7628c74
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopSimplifyCFG.h
@@ -0,0 +1,34 @@
+//===- LoopSimplifyCFG.cpp - Loop CFG Simplification Pass -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Loop SimplifyCFG Pass. This pass is responsible for
+// basic loop CFG cleanup, primarily to assist other loop passes. If you
+// encounter a noncanonical CFG construct that causes another loop pass to
+// perform suboptimally, this is the place to fix it up.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPSIMPLIFYCFG_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPSIMPLIFYCFG_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+/// Performs basic CFG simplifications to assist other loop passes.
+class LoopSimplifyCFGPass : public PassInfoMixin<LoopSimplifyCFGPass> {
+public:
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPSIMPLIFYCFG_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopSink.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopSink.h
new file mode 100644
index 0000000..371a7c8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopSink.h
@@ -0,0 +1,40 @@
+//===- LoopSink.h - Loop Sink Pass ------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface for the Loop Sink pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPSINK_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPSINK_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+/// A pass that does profile-guided sinking of instructions into loops.
+///
+/// This is a function pass as it shouldn't be composed into any kind of
+/// unified loop pass pipeline. The goal of it is to sink code into loops that
+/// is loop invariant but only required within the loop body when doing so
+/// reduces the global expected dynamic frequency with which it executes.
+/// A classic example is an extremely cold branch within a loop body.
+///
+/// We do this as a separate pass so that during normal optimization all
+/// invariant operations can be held outside the loop body to simplify
+/// fundamental analyses and transforms of the loop.
+class LoopSinkPass : public PassInfoMixin<LoopSinkPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPSINK_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopStrengthReduce.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopStrengthReduce.h
new file mode 100644
index 0000000..62c038a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopStrengthReduce.h
@@ -0,0 +1,42 @@
+//===- LoopStrengthReduce.h - Loop Strength Reduce Pass ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This transformation analyzes and transforms the induction variables (and
+// computations derived from them) into forms suitable for efficient execution
+// on the target.
+//
+// This pass performs a strength reduction on array references inside loops that
+// have as one or more of their components the loop induction variable, it
+// rewrites expressions to take advantage of scaled-index addressing modes
+// available on the target, and it performs a variety of other optimizations
+// related to loop induction variables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPSTRENGTHREDUCE_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPSTRENGTHREDUCE_H
+
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Loop;
+class LPMUpdater;
+
+/// Performs Loop Strength Reduce Pass.
+class LoopStrengthReducePass : public PassInfoMixin<LoopStrengthReducePass> {
+public:
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPSTRENGTHREDUCE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LoopUnrollPass.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopUnrollPass.h
new file mode 100644
index 0000000..9848e0d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LoopUnrollPass.h
@@ -0,0 +1,49 @@
+//===- LoopUnrollPass.h -----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOOPUNROLLPASS_H
+#define LLVM_TRANSFORMS_SCALAR_LOOPUNROLLPASS_H
+
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+class Loop;
+class LPMUpdater;
+
+/// Loop unroll pass that only does full loop unrolling.
+class LoopFullUnrollPass : public PassInfoMixin<LoopFullUnrollPass> {
+  const int OptLevel;
+
+public:
+  explicit LoopFullUnrollPass(int OptLevel = 2) : OptLevel(OptLevel) {}
+
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+/// Loop unroll pass that will support both full and partial unrolling.
+/// It is a function pass to have access to function and module analyses.
+/// It will also put loops into canonical form (simplified and LCSSA).
+class LoopUnrollPass : public PassInfoMixin<LoopUnrollPass> {
+  const int OptLevel;
+
+public:
+  /// This uses the target information (or flags) to control the thresholds for
+  /// different unrolling stategies but supports all of them.
+  explicit LoopUnrollPass(int OptLevel = 2) : OptLevel(OptLevel) {}
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOOPUNROLLPASS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LowerAtomic.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerAtomic.h
new file mode 100644
index 0000000..a4a2e7a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerAtomic.h
@@ -0,0 +1,29 @@
+//===- LowerAtomic.cpp - Lower atomic intrinsics ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+// This pass lowers atomic intrinsics to non-atomic form for use in a known
+// non-preemptible environment.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOWERATOMIC_H
+#define LLVM_TRANSFORMS_SCALAR_LOWERATOMIC_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// A pass that lowers atomic intrinsic into non-atomic intrinsics.
+class LowerAtomicPass : public PassInfoMixin<LowerAtomicPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_LOWERATOMIC_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h
new file mode 100644
index 0000000..ab9dec0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h
@@ -0,0 +1,37 @@
+//===- LowerExpectIntrinsic.h - LowerExpectIntrinsic pass -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// The header file for the LowerExpectIntrinsic pass as used by the new pass
+/// manager.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOWEREXPECTINTRINSIC_H
+#define LLVM_TRANSFORMS_SCALAR_LOWEREXPECTINTRINSIC_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+struct LowerExpectIntrinsicPass : PassInfoMixin<LowerExpectIntrinsicPass> {
+  /// \brief Run the pass over the function.
+  ///
+  /// This will lower all of the expect intrinsic calls in this function into
+  /// branch weight metadata. That metadata will subsequently feed the analysis
+  /// of the probabilities and frequencies of the CFG. After running this pass,
+  /// no more expect intrinsics remain, allowing the rest of the optimizer to
+  /// ignore them.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
+};
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/LowerGuardIntrinsic.h b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerGuardIntrinsic.h
new file mode 100644
index 0000000..a9f19f6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/LowerGuardIntrinsic.h
@@ -0,0 +1,28 @@
+//===--- LowerGuardIntrinsic.h - Lower the guard intrinsic ---------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass lowers the llvm.experimental.guard intrinsic to a conditional call
+// to @llvm.experimental.deoptimize.  Once this happens, the guard can no longer
+// be widened.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_SCALAR_LOWERGUARDINTRINSIC_H
+#define LLVM_TRANSFORMS_SCALAR_LOWERGUARDINTRINSIC_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+struct LowerGuardIntrinsicPass : PassInfoMixin<LowerGuardIntrinsicPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+}
+
+#endif //LLVM_TRANSFORMS_SCALAR_LOWERGUARDINTRINSIC_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/MemCpyOptimizer.h b/linux-x64/clang/include/llvm/Transforms/Scalar/MemCpyOptimizer.h
new file mode 100644
index 0000000..046c808
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/MemCpyOptimizer.h
@@ -0,0 +1,79 @@
+//===- MemCpyOptimizer.h - memcpy optimization ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass performs various transformations related to eliminating memcpy
+// calls, or transforming sets of stores into memset's.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_MEMCPYOPTIMIZER_H
+#define LLVM_TRANSFORMS_SCALAR_MEMCPYOPTIMIZER_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/PassManager.h"
+#include <cstdint>
+#include <functional>
+
+namespace llvm {
+
+class AssumptionCache;
+class CallInst;
+class DominatorTree;
+class Function;
+class Instruction;
+class MemCpyInst;
+class MemMoveInst;
+class MemoryDependenceResults;
+class MemSetInst;
+class StoreInst;
+class TargetLibraryInfo;
+class Value;
+
+class MemCpyOptPass : public PassInfoMixin<MemCpyOptPass> {
+  MemoryDependenceResults *MD = nullptr;
+  TargetLibraryInfo *TLI = nullptr;
+  std::function<AliasAnalysis &()> LookupAliasAnalysis;
+  std::function<AssumptionCache &()> LookupAssumptionCache;
+  std::function<DominatorTree &()> LookupDomTree;
+
+public:
+  MemCpyOptPass() = default;
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  // Glue for the old PM.
+  bool runImpl(Function &F, MemoryDependenceResults *MD_,
+               TargetLibraryInfo *TLI_,
+               std::function<AliasAnalysis &()> LookupAliasAnalysis_,
+               std::function<AssumptionCache &()> LookupAssumptionCache_,
+               std::function<DominatorTree &()> LookupDomTree_);
+
+private:
+  // Helper functions
+  bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
+  bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI);
+  bool processMemCpy(MemCpyInst *M);
+  bool processMemMove(MemMoveInst *M);
+  bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc,
+                            uint64_t cpyLen, unsigned cpyAlign, CallInst *C);
+  bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep);
+  bool processMemSetMemCpyDependence(MemCpyInst *M, MemSetInst *MDep);
+  bool performMemCpyToMemSetOptzn(MemCpyInst *M, MemSetInst *MDep);
+  bool processByValArgument(CallSite CS, unsigned ArgNo);
+  Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr,
+                                    Value *ByteVal);
+
+  bool iterateOnFunction(Function &F);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_MEMCPYOPTIMIZER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h b/linux-x64/clang/include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h
new file mode 100644
index 0000000..3cad7bb
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/MergedLoadStoreMotion.h
@@ -0,0 +1,39 @@
+//===- MergedLoadStoreMotion.h - merge and hoist/sink load/stores ---------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//! \file
+//! \brief This pass performs merges of loads and stores on both sides of a
+//  diamond (hammock). It hoists the loads and sinks the stores.
+//
+// The algorithm iteratively hoists two loads to the same address out of a
+// diamond (hammock) and merges them into a single load in the header. Similar
+// it sinks and merges two stores to the tail block (footer). The algorithm
+// iterates over the instructions of one side of the diamond and attempts to
+// find a matching load/store on the other side. It hoists / sinks when it
+// thinks it safe to do so.  This optimization helps with eg. hiding load
+// latencies, triggering if-conversion, and reducing static code size.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_MERGEDLOADSTOREMOTION_H
+#define LLVM_TRANSFORMS_SCALAR_MERGEDLOADSTOREMOTION_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class MergedLoadStoreMotionPass
+    : public PassInfoMixin<MergedLoadStoreMotionPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_MERGEDLOADSTOREMOTION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/NaryReassociate.h b/linux-x64/clang/include/llvm/Transforms/Scalar/NaryReassociate.h
new file mode 100644
index 0000000..e835bd5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/NaryReassociate.h
@@ -0,0 +1,189 @@
+//===- NaryReassociate.h - Reassociate n-ary expressions --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass reassociates n-ary add expressions and eliminates the redundancy
+// exposed by the reassociation.
+//
+// A motivating example:
+//
+//   void foo(int a, int b) {
+//     bar(a + b);
+//     bar((a + 2) + b);
+//   }
+//
+// An ideal compiler should reassociate (a + 2) + b to (a + b) + 2 and simplify
+// the above code to
+//
+//   int t = a + b;
+//   bar(t);
+//   bar(t + 2);
+//
+// However, the Reassociate pass is unable to do that because it processes each
+// instruction individually and believes (a + 2) + b is the best form according
+// to its rank system.
+//
+// To address this limitation, NaryReassociate reassociates an expression in a
+// form that reuses existing instructions. As a result, NaryReassociate can
+// reassociate (a + 2) + b in the example to (a + b) + 2 because it detects that
+// (a + b) is computed before.
+//
+// NaryReassociate works as follows. For every instruction in the form of (a +
+// b) + c, it checks whether a + c or b + c is already computed by a dominating
+// instruction. If so, it then reassociates (a + b) + c into (a + c) + b or (b +
+// c) + a and removes the redundancy accordingly. To efficiently look up whether
+// an expression is computed before, we store each instruction seen and its SCEV
+// into an SCEV-to-instruction map.
+//
+// Although the algorithm pattern-matches only ternary additions, it
+// automatically handles many >3-ary expressions by walking through the function
+// in the depth-first order. For example, given
+//
+//   (a + c) + d
+//   ((a + b) + c) + d
+//
+// NaryReassociate first rewrites (a + b) + c to (a + c) + b, and then rewrites
+// ((a + c) + b) + d into ((a + c) + d) + b.
+//
+// Finally, the above dominator-based algorithm may need to be run multiple
+// iterations before emitting optimal code. One source of this need is that we
+// only split an operand when it is used only once. The above algorithm can
+// eliminate an instruction and decrease the usage count of its operands. As a
+// result, an instruction that previously had multiple uses may become a
+// single-use instruction and thus eligible for split consideration. For
+// example,
+//
+//   ac = a + c
+//   ab = a + b
+//   abc = ab + c
+//   ab2 = ab + b
+//   ab2c = ab2 + c
+//
+// In the first iteration, we cannot reassociate abc to ac+b because ab is used
+// twice. However, we can reassociate ab2c to abc+b in the first iteration. As a
+// result, ab2 becomes dead and ab will be used only once in the second
+// iteration.
+//
+// Limitations and TODO items:
+//
+// 1) We only considers n-ary adds and muls for now. This should be extended
+// and generalized.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_NARYREASSOCIATE_H
+#define LLVM_TRANSFORMS_SCALAR_NARYREASSOCIATE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/ValueHandle.h"
+
+namespace llvm {
+
+class AssumptionCache;
+class BinaryOperator;
+class DataLayout;
+class DominatorTree;
+class Function;
+class GetElementPtrInst;
+class Instruction;
+class ScalarEvolution;
+class SCEV;
+class TargetLibraryInfo;
+class TargetTransformInfo;
+class Type;
+class Value;
+
+class NaryReassociatePass : public PassInfoMixin<NaryReassociatePass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  // Glue for old PM.
+  bool runImpl(Function &F, AssumptionCache *AC_, DominatorTree *DT_,
+               ScalarEvolution *SE_, TargetLibraryInfo *TLI_,
+               TargetTransformInfo *TTI_);
+
+private:
+  // Runs only one iteration of the dominator-based algorithm. See the header
+  // comments for why we need multiple iterations.
+  bool doOneIteration(Function &F);
+
+  // Reassociates I for better CSE.
+  Instruction *tryReassociate(Instruction *I);
+
+  // Reassociate GEP for better CSE.
+  Instruction *tryReassociateGEP(GetElementPtrInst *GEP);
+
+  // Try splitting GEP at the I-th index and see whether either part can be
+  // CSE'ed. This is a helper function for tryReassociateGEP.
+  //
+  // \p IndexedType The element type indexed by GEP's I-th index. This is
+  //                equivalent to
+  //                  GEP->getIndexedType(GEP->getPointerOperand(), 0-th index,
+  //                                      ..., i-th index).
+  GetElementPtrInst *tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
+                                              unsigned I, Type *IndexedType);
+
+  // Given GEP's I-th index = LHS + RHS, see whether &Base[..][LHS][..] or
+  // &Base[..][RHS][..] can be CSE'ed and rewrite GEP accordingly.
+  GetElementPtrInst *tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
+                                              unsigned I, Value *LHS,
+                                              Value *RHS, Type *IndexedType);
+
+  // Reassociate binary operators for better CSE.
+  Instruction *tryReassociateBinaryOp(BinaryOperator *I);
+
+  // A helper function for tryReassociateBinaryOp. LHS and RHS are explicitly
+  // passed.
+  Instruction *tryReassociateBinaryOp(Value *LHS, Value *RHS,
+                                      BinaryOperator *I);
+  // Rewrites I to (LHS op RHS) if LHS is computed already.
+  Instruction *tryReassociatedBinaryOp(const SCEV *LHS, Value *RHS,
+                                       BinaryOperator *I);
+
+  // Tries to match Op1 and Op2 by using V.
+  bool matchTernaryOp(BinaryOperator *I, Value *V, Value *&Op1, Value *&Op2);
+
+  // Gets SCEV for (LHS op RHS).
+  const SCEV *getBinarySCEV(BinaryOperator *I, const SCEV *LHS,
+                            const SCEV *RHS);
+
+  // Returns the closest dominator of \c Dominatee that computes
+  // \c CandidateExpr. Returns null if not found.
+  Instruction *findClosestMatchingDominator(const SCEV *CandidateExpr,
+                                            Instruction *Dominatee);
+
+  // GetElementPtrInst implicitly sign-extends an index if the index is shorter
+  // than the pointer size. This function returns whether Index is shorter than
+  // GEP's pointer size, i.e., whether Index needs to be sign-extended in order
+  // to be an index of GEP.
+  bool requiresSignExtension(Value *Index, GetElementPtrInst *GEP);
+
+  AssumptionCache *AC;
+  const DataLayout *DL;
+  DominatorTree *DT;
+  ScalarEvolution *SE;
+  TargetLibraryInfo *TLI;
+  TargetTransformInfo *TTI;
+
+  // A lookup table quickly telling which instructions compute the given SCEV.
+  // Note that there can be multiple instructions at different locations
+  // computing to the same SCEV, so we map a SCEV to an instruction list.  For
+  // example,
+  //
+  //   if (p1)
+  //     foo(a + b);
+  //   if (p2)
+  //     bar(a + b);
+  DenseMap<const SCEV *, SmallVector<WeakTrackingVH, 2>> SeenExprs;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_NARYREASSOCIATE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/NewGVN.h b/linux-x64/clang/include/llvm/Transforms/Scalar/NewGVN.h
new file mode 100644
index 0000000..05db255
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/NewGVN.h
@@ -0,0 +1,33 @@
+//===- NewGVN.h - Global Value Numbering Pass -------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file provides the interface for LLVM's Global Value Numbering pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_NEWGVN_H
+#define LLVM_TRANSFORMS_SCALAR_NEWGVN_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+class NewGVNPass : public PassInfoMixin<NewGVNPass> {
+public:
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_NEWGVN_H
+
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/PartiallyInlineLibCalls.h b/linux-x64/clang/include/llvm/Transforms/Scalar/PartiallyInlineLibCalls.h
new file mode 100644
index 0000000..7f73831
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/PartiallyInlineLibCalls.h
@@ -0,0 +1,30 @@
+//===--- PartiallyInlineLibCalls.h - Partially inline libcalls --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass tries to partially inline the fast path of well-known library
+// functions, such as using square-root instructions for cases where sqrt()
+// does not need to set errno.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_PARTIALLYINLINELIBCALLS_H
+#define LLVM_TRANSFORMS_SCALAR_PARTIALLYINLINELIBCALLS_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class PartiallyInlineLibCallsPass
+    : public PassInfoMixin<PartiallyInlineLibCallsPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_PARTIALLYINLINELIBCALLS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/Reassociate.h b/linux-x64/clang/include/llvm/Transforms/Scalar/Reassociate.h
new file mode 100644
index 0000000..9997dfa
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/Reassociate.h
@@ -0,0 +1,120 @@
+//===- Reassociate.h - Reassociate binary expressions -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass reassociates commutative expressions in an order that is designed
+// to promote better constant propagation, GCSE, LICM, PRE, etc.
+//
+// For example: 4 + (x + 5) -> x + (4 + 5)
+//
+// In the implementation of this algorithm, constants are assigned rank = 0,
+// function arguments are rank = 1, and other values are assigned ranks
+// corresponding to the reverse post order traversal of current function
+// (starting at 2), which effectively gives values in deep loops higher rank
+// than values not in loops.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_REASSOCIATE_H
+#define LLVM_TRANSFORMS_SCALAR_REASSOCIATE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/ValueHandle.h"
+
+namespace llvm {
+
+class APInt;
+class BasicBlock;
+class BinaryOperator;
+class Function;
+class Instruction;
+class Value;
+
+/// A private "module" namespace for types and utilities used by Reassociate.
+/// These are implementation details and should not be used by clients.
+namespace reassociate {
+
+struct ValueEntry {
+  unsigned Rank;
+  Value *Op;
+
+  ValueEntry(unsigned R, Value *O) : Rank(R), Op(O) {}
+};
+
+inline bool operator<(const ValueEntry &LHS, const ValueEntry &RHS) {
+  return LHS.Rank > RHS.Rank; // Sort so that highest rank goes to start.
+}
+
+/// \brief Utility class representing a base and exponent pair which form one
+/// factor of some product.
+struct Factor {
+  Value *Base;
+  unsigned Power;
+
+  Factor(Value *Base, unsigned Power) : Base(Base), Power(Power) {}
+};
+
+class XorOpnd;
+
+} // end namespace reassociate
+
+/// Reassociate commutative expressions.
+class ReassociatePass : public PassInfoMixin<ReassociatePass> {
+  DenseMap<BasicBlock *, unsigned> RankMap;
+  DenseMap<AssertingVH<Value>, unsigned> ValueRankMap;
+  SetVector<AssertingVH<Instruction>> RedoInsts;
+
+  // Arbitrary, but prevents quadratic behavior.
+  static const unsigned GlobalReassociateLimit = 10;
+  static const unsigned NumBinaryOps =
+      Instruction::BinaryOpsEnd - Instruction::BinaryOpsBegin;
+  DenseMap<std::pair<Value *, Value *>, unsigned> PairMap[NumBinaryOps];
+
+  bool MadeChange;
+
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
+
+private:
+  void BuildRankMap(Function &F, ReversePostOrderTraversal<Function *> &RPOT);
+  unsigned getRank(Value *V);
+  void canonicalizeOperands(Instruction *I);
+  void ReassociateExpression(BinaryOperator *I);
+  void RewriteExprTree(BinaryOperator *I,
+                       SmallVectorImpl<reassociate::ValueEntry> &Ops);
+  Value *OptimizeExpression(BinaryOperator *I,
+                            SmallVectorImpl<reassociate::ValueEntry> &Ops);
+  Value *OptimizeAdd(Instruction *I,
+                     SmallVectorImpl<reassociate::ValueEntry> &Ops);
+  Value *OptimizeXor(Instruction *I,
+                     SmallVectorImpl<reassociate::ValueEntry> &Ops);
+  bool CombineXorOpnd(Instruction *I, reassociate::XorOpnd *Opnd1,
+                      APInt &ConstOpnd, Value *&Res);
+  bool CombineXorOpnd(Instruction *I, reassociate::XorOpnd *Opnd1,
+                      reassociate::XorOpnd *Opnd2, APInt &ConstOpnd,
+                      Value *&Res);
+  Value *buildMinimalMultiplyDAG(IRBuilder<> &Builder,
+                                 SmallVectorImpl<reassociate::Factor> &Factors);
+  Value *OptimizeMul(BinaryOperator *I,
+                     SmallVectorImpl<reassociate::ValueEntry> &Ops);
+  Value *RemoveFactorFromExpression(Value *V, Value *Factor);
+  void EraseInst(Instruction *I);
+  void RecursivelyEraseDeadInsts(Instruction *I,
+                                 SetVector<AssertingVH<Instruction>> &Insts);
+  void OptimizeInst(Instruction *I);
+  Instruction *canonicalizeNegConstExpr(Instruction *I);
+  void BuildPairMap(ReversePostOrderTraversal<Function *> &RPOT);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_REASSOCIATE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/RewriteStatepointsForGC.h b/linux-x64/clang/include/llvm/Transforms/Scalar/RewriteStatepointsForGC.h
new file mode 100644
index 0000000..128f176
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/RewriteStatepointsForGC.h
@@ -0,0 +1,39 @@
+//===- RewriteStatepointsForGC.h - ------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides interface to "Rewrite Statepoints for GC" pass.
+//
+// This passe rewrites call/invoke instructions so as to make potential
+// relocations performed by the garbage collector explicit in the IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_REWRITE_STATEPOINTS_FOR_GC_H
+#define LLVM_TRANSFORMS_SCALAR_REWRITE_STATEPOINTS_FOR_GC_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class DominatorTree;
+class Function;
+class Module;
+class TargetTransformInfo;
+class TargetLibraryInfo;
+
+struct RewriteStatepointsForGC : public PassInfoMixin<RewriteStatepointsForGC> {
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+
+  bool runOnFunction(Function &F, DominatorTree &, TargetTransformInfo &,
+                     const TargetLibraryInfo &);
+};
+
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_REWRITE_STATEPOINTS_FOR_GC_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/SCCP.h b/linux-x64/clang/include/llvm/Transforms/Scalar/SCCP.h
new file mode 100644
index 0000000..2a294c9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/SCCP.h
@@ -0,0 +1,43 @@
+//===- SCCP.cpp - Sparse Conditional Constant Propagation -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// \file
+// This file implements sparse conditional constant propagation and merging:
+//
+// Specifically, this:
+//   * Assumes values are constant unless proven otherwise
+//   * Assumes BasicBlocks are dead unless proven otherwise
+//   * Proves values to be constant, and replaces them with constants
+//   * Proves conditional branches to be unconditional
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SCCP_H
+#define LLVM_TRANSFORMS_SCALAR_SCCP_H
+
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+/// This pass performs function-level constant propagation and merging.
+class SCCPPass : public PassInfoMixin<SCCPPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+bool runIPSCCP(Module &M, const DataLayout &DL, const TargetLibraryInfo *TLI);
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_SCCP_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/SROA.h b/linux-x64/clang/include/llvm/Transforms/Scalar/SROA.h
new file mode 100644
index 0000000..4a321e7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/SROA.h
@@ -0,0 +1,139 @@
+//===- SROA.h - Scalar Replacement Of Aggregates ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for LLVM's Scalar Replacement of
+/// Aggregates pass. This pass provides both aggregate splitting and the
+/// primary SSA formation used in the compiler.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SROA_H
+#define LLVM_TRANSFORMS_SCALAR_SROA_H
+
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/Compiler.h"
+#include <vector>
+
+namespace llvm {
+
+class AllocaInst;
+class AssumptionCache;
+class DominatorTree;
+class Function;
+class Instruction;
+class LLVMContext;
+class PHINode;
+class SelectInst;
+class Use;
+
+/// A private "module" namespace for types and utilities used by SROA. These
+/// are implementation details and should not be used by clients.
+namespace sroa LLVM_LIBRARY_VISIBILITY {
+
+class AllocaSliceRewriter;
+class AllocaSlices;
+class Partition;
+class SROALegacyPass;
+
+} // end namespace sroa
+
+/// \brief An optimization pass providing Scalar Replacement of Aggregates.
+///
+/// This pass takes allocations which can be completely analyzed (that is, they
+/// don't escape) and tries to turn them into scalar SSA values. There are
+/// a few steps to this process.
+///
+/// 1) It takes allocations of aggregates and analyzes the ways in which they
+///    are used to try to split them into smaller allocations, ideally of
+///    a single scalar data type. It will split up memcpy and memset accesses
+///    as necessary and try to isolate individual scalar accesses.
+/// 2) It will transform accesses into forms which are suitable for SSA value
+///    promotion. This can be replacing a memset with a scalar store of an
+///    integer value, or it can involve speculating operations on a PHI or
+///    select to be a PHI or select of the results.
+/// 3) Finally, this will try to detect a pattern of accesses which map cleanly
+///    onto insert and extract operations on a vector value, and convert them to
+///    this form. By doing so, it will enable promotion of vector aggregates to
+///    SSA vector values.
+class SROA : public PassInfoMixin<SROA> {
+  LLVMContext *C = nullptr;
+  DominatorTree *DT = nullptr;
+  AssumptionCache *AC = nullptr;
+
+  /// \brief Worklist of alloca instructions to simplify.
+  ///
+  /// Each alloca in the function is added to this. Each new alloca formed gets
+  /// added to it as well to recursively simplify unless that alloca can be
+  /// directly promoted. Finally, each time we rewrite a use of an alloca other
+  /// the one being actively rewritten, we add it back onto the list if not
+  /// already present to ensure it is re-visited.
+  SetVector<AllocaInst *, SmallVector<AllocaInst *, 16>> Worklist;
+
+  /// \brief A collection of instructions to delete.
+  /// We try to batch deletions to simplify code and make things a bit more
+  /// efficient.
+  SetVector<Instruction *, SmallVector<Instruction *, 8>> DeadInsts;
+
+  /// \brief Post-promotion worklist.
+  ///
+  /// Sometimes we discover an alloca which has a high probability of becoming
+  /// viable for SROA after a round of promotion takes place. In those cases,
+  /// the alloca is enqueued here for re-processing.
+  ///
+  /// Note that we have to be very careful to clear allocas out of this list in
+  /// the event they are deleted.
+  SetVector<AllocaInst *, SmallVector<AllocaInst *, 16>> PostPromotionWorklist;
+
+  /// \brief A collection of alloca instructions we can directly promote.
+  std::vector<AllocaInst *> PromotableAllocas;
+
+  /// \brief A worklist of PHIs to speculate prior to promoting allocas.
+  ///
+  /// All of these PHIs have been checked for the safety of speculation and by
+  /// being speculated will allow promoting allocas currently in the promotable
+  /// queue.
+  SetVector<PHINode *, SmallVector<PHINode *, 2>> SpeculatablePHIs;
+
+  /// \brief A worklist of select instructions to speculate prior to promoting
+  /// allocas.
+  ///
+  /// All of these select instructions have been checked for the safety of
+  /// speculation and by being speculated will allow promoting allocas
+  /// currently in the promotable queue.
+  SetVector<SelectInst *, SmallVector<SelectInst *, 2>> SpeculatableSelects;
+
+public:
+  SROA() = default;
+
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+private:
+  friend class sroa::AllocaSliceRewriter;
+  friend class sroa::SROALegacyPass;
+
+  /// Helper used by both the public run method and by the legacy pass.
+  PreservedAnalyses runImpl(Function &F, DominatorTree &RunDT,
+                            AssumptionCache &RunAC);
+
+  bool presplitLoadsAndStores(AllocaInst &AI, sroa::AllocaSlices &AS);
+  AllocaInst *rewritePartition(AllocaInst &AI, sroa::AllocaSlices &AS,
+                               sroa::Partition &P);
+  bool splitAlloca(AllocaInst &AI, sroa::AllocaSlices &AS);
+  bool runOnAlloca(AllocaInst &AI);
+  void clobberUse(Use &U);
+  bool deleteDeadInstructions(SmallPtrSetImpl<AllocaInst *> &DeletedAllocas);
+  bool promoteAllocas(Function &F);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_SROA_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/SimpleLoopUnswitch.h b/linux-x64/clang/include/llvm/Transforms/Scalar/SimpleLoopUnswitch.h
new file mode 100644
index 0000000..63bfe63
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/SimpleLoopUnswitch.h
@@ -0,0 +1,55 @@
+//===- SimpleLoopUnswitch.h - Hoist loop-invariant control flow -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SIMPLELOOPUNSWITCH_H
+#define LLVM_TRANSFORMS_SCALAR_SIMPLELOOPUNSWITCH_H
+
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+/// This pass transforms loops that contain branches on loop-invariant
+/// conditions to have multiple loops. For example, it turns the left into the
+/// right code:
+///
+///  for (...)                  if (lic)
+///    A                          for (...)
+///    if (lic)                     A; B; C
+///      B                      else
+///    C                          for (...)
+///                                 A; C
+///
+/// This can increase the size of the code exponentially (doubling it every time
+/// a loop is unswitched) so we only unswitch if the resultant code will be
+/// smaller than a threshold.
+///
+/// This pass expects LICM to be run before it to hoist invariant conditions out
+/// of the loop, to make the unswitching opportunity obvious.
+///
+class SimpleLoopUnswitchPass : public PassInfoMixin<SimpleLoopUnswitchPass> {
+  bool NonTrivial;
+
+public:
+  SimpleLoopUnswitchPass(bool NonTrivial = false) : NonTrivial(NonTrivial) {}
+
+  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+/// Create the legacy pass object for the simple loop unswitcher.
+///
+/// See the documentaion for `SimpleLoopUnswitchPass` for details.
+Pass *createSimpleLoopUnswitchLegacyPass(bool NonTrivial = false);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_SIMPLELOOPUNSWITCH_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/SimplifyCFG.h b/linux-x64/clang/include/llvm/Transforms/Scalar/SimplifyCFG.h
new file mode 100644
index 0000000..6198957
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/SimplifyCFG.h
@@ -0,0 +1,55 @@
+//===- SimplifyCFG.h - Simplify and canonicalize the CFG --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for the pass responsible for both
+/// simplifying and canonicalizing the CFG.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SIMPLIFYCFG_H
+#define LLVM_TRANSFORMS_SCALAR_SIMPLIFYCFG_H
+
+#include "llvm/Analysis/Utils/Local.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// A pass to simplify and canonicalize the CFG of a function.
+///
+/// This pass iteratively simplifies the entire CFG of a function. It may change
+/// or remove control flow to put the CFG into a canonical form expected by
+/// other passes of the mid-level optimizer. Depending on the specified options,
+/// it may further optimize control-flow to create non-canonical forms.
+class SimplifyCFGPass : public PassInfoMixin<SimplifyCFGPass> {
+  SimplifyCFGOptions Options;
+
+public:
+  /// The default constructor sets the pass options to create canonical IR,
+  /// rather than optimal IR. That is, by default we bypass transformations that
+  /// are likely to improve performance but make analysis for other passes more
+  /// difficult.
+  SimplifyCFGPass()
+      : SimplifyCFGPass(SimplifyCFGOptions()
+                            .forwardSwitchCondToPhi(false)
+                            .convertSwitchToLookupTable(false)
+                            .needCanonicalLoops(true)
+                            .sinkCommonInsts(false)) {}
+
+
+  /// Construct a pass with optional optimizations.
+  SimplifyCFGPass(const SimplifyCFGOptions &PassOptions);
+
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/Sink.h b/linux-x64/clang/include/llvm/Transforms/Scalar/Sink.h
new file mode 100644
index 0000000..f9b3cb0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/Sink.h
@@ -0,0 +1,30 @@
+//===-- Sink.h - Code Sinking -----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass moves instructions into successor blocks, when possible, so that
+// they aren't executed on paths where their results aren't needed.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SINK_H
+#define LLVM_TRANSFORMS_SCALAR_SINK_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Move instructions into successor blocks when possible.
+class SinkingPass : public PassInfoMixin<SinkingPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_SINK_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/SpeculateAroundPHIs.h b/linux-x64/clang/include/llvm/Transforms/Scalar/SpeculateAroundPHIs.h
new file mode 100644
index 0000000..f39e03d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/SpeculateAroundPHIs.h
@@ -0,0 +1,111 @@
+//===- SpeculateAroundPHIs.h - Speculate around PHIs ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SPECULATEAROUNDPHIS_H
+#define LLVM_TRANSFORMS_SCALAR_SPECULATEAROUNDPHIS_H
+
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/Compiler.h"
+#include <vector>
+
+namespace llvm {
+
+/// This pass handles simple speculating of  instructions around PHIs when
+/// doing so is profitable for a particular target despite duplicated
+/// instructions.
+///
+/// The motivating example are PHIs of constants which will require
+/// materializing the constants along each edge. If the PHI is used by an
+/// instruction where the target can materialize the constant as part of the
+/// instruction, it is profitable to speculate those instructions around the
+/// PHI node. This can reduce dynamic instruction count as well as decrease
+/// register pressure.
+///
+/// Consider this IR for example:
+///   ```
+///   entry:
+///     br i1 %flag, label %a, label %b
+///
+///   a:
+///     br label %exit
+///
+///   b:
+///     br label %exit
+///
+///   exit:
+///     %p = phi i32 [ 7, %a ], [ 11, %b ]
+///     %sum = add i32 %arg, %p
+///     ret i32 %sum
+///   ```
+/// To materialize the inputs to this PHI node may require an explicit
+/// instruction. For example, on x86 this would turn into something like
+///   ```
+///     testq %eax, %eax
+///     movl $7, %rNN
+///     jne .L
+///     movl $11, %rNN
+///   .L:
+///     addl %edi, %rNN
+///     movl %rNN, %eax
+///     retq
+///   ```
+/// When these constants can be folded directly into another instruction, it
+/// would be preferable to avoid the potential for register pressure (above we
+/// can easily avoid it, but that isn't always true) and simply duplicate the
+/// instruction using the PHI:
+///   ```
+///   entry:
+///     br i1 %flag, label %a, label %b
+///
+///   a:
+///     %sum.1 = add i32 %arg, 7
+///     br label %exit
+///
+///   b:
+///     %sum.2 = add i32 %arg, 11
+///     br label %exit
+///
+///   exit:
+///     %p = phi i32 [ %sum.1, %a ], [ %sum.2, %b ]
+///     ret i32 %p
+///   ```
+/// Which will generate something like the following on x86:
+///   ```
+///     testq %eax, %eax
+///     addl $7, %edi
+///     jne .L
+///     addl $11, %edi
+///   .L:
+///     movl %edi, %eax
+///     retq
+///   ```
+///
+/// It is important to note that this pass is never intended to handle more
+/// complex cases where speculating around PHIs allows simplifications of the
+/// IR itself or other subsequent optimizations. Those can and should already
+/// be handled before this pass is ever run by a more powerful analysis that
+/// can reason about equivalences and common subexpressions. Classically, those
+/// cases would be handled by a GVN-powered PRE or similar transform. This
+/// pass, in contrast, is *only* interested in cases where despite no
+/// simplifications to the IR itself, speculation is *faster* to execute. The
+/// result of this is that the cost models which are appropriate to consider
+/// here are relatively simple ones around execution and codesize cost, without
+/// any need to consider simplifications or other transformations.
+struct SpeculateAroundPHIsPass : PassInfoMixin<SpeculateAroundPHIsPass> {
+  /// \brief Run the pass over the function.
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_SPECULATEAROUNDPHIS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/SpeculativeExecution.h b/linux-x64/clang/include/llvm/Transforms/Scalar/SpeculativeExecution.h
new file mode 100644
index 0000000..068f817
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/SpeculativeExecution.h
@@ -0,0 +1,92 @@
+//===- SpeculativeExecution.h -----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass hoists instructions to enable speculative execution on
+// targets where branches are expensive. This is aimed at GPUs. It
+// currently works on simple if-then and if-then-else
+// patterns.
+//
+// Removing branches is not the only motivation for this
+// pass. E.g. consider this code and assume that there is no
+// addressing mode for multiplying by sizeof(*a):
+//
+//   if (b > 0)
+//     c = a[i + 1]
+//   if (d > 0)
+//     e = a[i + 2]
+//
+// turns into
+//
+//   p = &a[i + 1];
+//   if (b > 0)
+//     c = *p;
+//   q = &a[i + 2];
+//   if (d > 0)
+//     e = *q;
+//
+// which could later be optimized to
+//
+//   r = &a[i];
+//   if (b > 0)
+//     c = r[1];
+//   if (d > 0)
+//     e = r[2];
+//
+// Later passes sink back much of the speculated code that did not enable
+// further optimization.
+//
+// This pass is more aggressive than the function SpeculativeyExecuteBB in
+// SimplifyCFG. SimplifyCFG will not speculate if no selects are introduced and
+// it will speculate at most one instruction. It also will not speculate if
+// there is a value defined in the if-block that is only used in the then-block.
+// These restrictions make sense since the speculation in SimplifyCFG seems
+// aimed at introducing cheap selects, while this pass is intended to do more
+// aggressive speculation while counting on later passes to either capitalize on
+// that or clean it up.
+//
+// If the pass was created by calling
+// createSpeculativeExecutionIfHasBranchDivergencePass or the
+// -spec-exec-only-if-divergent-target option is present, this pass only has an
+// effect on targets where TargetTransformInfo::hasBranchDivergence() is true;
+// on other targets, it is a nop.
+//
+// This lets you include this pass unconditionally in the IR pass pipeline, but
+// only enable it for relevant targets.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_SCALAR_SPECULATIVEEXECUTION_H
+#define LLVM_TRANSFORMS_SCALAR_SPECULATIVEEXECUTION_H
+
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class SpeculativeExecutionPass
+    : public PassInfoMixin<SpeculativeExecutionPass> {
+public:
+  SpeculativeExecutionPass(bool OnlyIfDivergentTarget = false);
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  // Glue for old PM
+  bool runImpl(Function &F, TargetTransformInfo *TTI);
+
+private:
+  bool runOnBasicBlock(BasicBlock &B);
+  bool considerHoistingFromTo(BasicBlock &FromBlock, BasicBlock &ToBlock);
+
+  // If true, this pass is a nop unless the target architecture has branch
+  // divergence.  
+  const bool OnlyIfDivergentTarget = false;
+
+  TargetTransformInfo *TTI = nullptr;
+};
+}
+
+#endif //LLVM_TRANSFORMS_SCALAR_SPECULATIVEEXECUTION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Scalar/TailRecursionElimination.h b/linux-x64/clang/include/llvm/Transforms/Scalar/TailRecursionElimination.h
new file mode 100644
index 0000000..793f9bc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Scalar/TailRecursionElimination.h
@@ -0,0 +1,66 @@
+//===---- TailRecursionElimination.h ----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file transforms calls of the current function (self recursion) followed
+// by a return instruction with a branch to the entry of the function, creating
+// a loop.  This pass also implements the following extensions to the basic
+// algorithm:
+//
+//  1. Trivial instructions between the call and return do not prevent the
+//     transformation from taking place, though currently the analysis cannot
+//     support moving any really useful instructions (only dead ones).
+//  2. This pass transforms functions that are prevented from being tail
+//     recursive by an associative and commutative expression to use an
+//     accumulator variable, thus compiling the typical naive factorial or
+//     'fib' implementation into efficient code.
+//  3. TRE is performed if the function returns void, if the return
+//     returns the result returned by the call, or if the function returns a
+//     run-time constant on all exits from the function.  It is possible, though
+//     unlikely, that the return returns something else (like constant 0), and
+//     can still be TRE'd.  It can be TRE'd if ALL OTHER return instructions in
+//     the function return the exact same value.
+//  4. If it can prove that callees do not access their caller stack frame,
+//     they are marked as eligible for tail call elimination (by the code
+//     generator).
+//
+// There are several improvements that could be made:
+//
+//  1. If the function has any alloca instructions, these instructions will be
+//     moved out of the entry block of the function, causing them to be
+//     evaluated each time through the tail recursion.  Safely keeping allocas
+//     in the entry block requires analysis to proves that the tail-called
+//     function does not read or write the stack object.
+//  2. Tail recursion is only performed if the call immediately precedes the
+//     return instruction.  It's possible that there could be a jump between
+//     the call and the return.
+//  3. There can be intervening operations between the call and the return that
+//     prevent the TRE from occurring.  For example, there could be GEP's and
+//     stores to memory that will not be read or written by the call.  This
+//     requires some substantial analysis (such as with DSA) to prove safe to
+//     move ahead of the call, but doing so could allow many more TREs to be
+//     performed, for example in TreeAdd/TreeAlloc from the treeadd benchmark.
+//  4. The algorithm we use to detect if callees access their caller stack
+//     frames is very primitive.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_TAILRECURSIONELIMINATION_H
+#define LLVM_TRANSFORMS_SCALAR_TAILRECURSIONELIMINATION_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+struct TailCallElimPass : PassInfoMixin<TailCallElimPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_TAILRECURSIONELIMINATION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils.h b/linux-x64/clang/include/llvm/Transforms/Utils.h
new file mode 100644
index 0000000..cfb89d1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils.h
@@ -0,0 +1,125 @@
+//===- llvm/Transforms/Utils.h - Utility Transformations --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the Utils transformations library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_H
+#define LLVM_TRANSFORMS_UTILS_H
+
+namespace llvm {
+
+class ModulePass;
+class FunctionPass;
+class Pass;
+
+//===----------------------------------------------------------------------===//
+// createMetaRenamerPass - Rename everything with metasyntatic names.
+//
+ModulePass *createMetaRenamerPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LowerInvoke - This pass removes invoke instructions, converting them to call
+// instructions.
+//
+FunctionPass *createLowerInvokePass();
+extern char &LowerInvokePassID;
+
+//===----------------------------------------------------------------------===//
+//
+// InstructionNamer - Give any unnamed non-void instructions "tmp" names.
+//
+FunctionPass *createInstructionNamerPass();
+extern char &InstructionNamerID;
+
+//===----------------------------------------------------------------------===//
+//
+// LowerSwitch - This pass converts SwitchInst instructions into a sequence of
+// chained binary branch instructions.
+//
+FunctionPass *createLowerSwitchPass();
+extern char &LowerSwitchID;
+
+//===----------------------------------------------------------------------===//
+//
+// EntryExitInstrumenter pass - Instrument function entry/exit with calls to
+// mcount(), @__cyg_profile_func_{enter,exit} and the like. There are two
+// variants, intended to run pre- and post-inlining, respectively.
+//
+FunctionPass *createEntryExitInstrumenterPass();
+FunctionPass *createPostInlineEntryExitInstrumenterPass();
+
+//===----------------------------------------------------------------------===//
+//
+// BreakCriticalEdges - Break all of the critical edges in the CFG by inserting
+// a dummy basic block. This pass may be "required" by passes that cannot deal
+// with critical edges. For this usage, a pass must call:
+//
+//   AU.addRequiredID(BreakCriticalEdgesID);
+//
+// This pass obviously invalidates the CFG, but can update forward dominator
+// (set, immediate dominators, tree, and frontier) information.
+//
+FunctionPass *createBreakCriticalEdgesPass();
+extern char &BreakCriticalEdgesID;
+
+//===----------------------------------------------------------------------===//
+//
+// LCSSA - This pass inserts phi nodes at loop boundaries to simplify other loop
+// optimizations.
+//
+Pass *createLCSSAPass();
+extern char &LCSSAID;
+
+//===----------------------------------------------------------------------===//
+//
+// AddDiscriminators - Add DWARF path discriminators to the IR.
+FunctionPass *createAddDiscriminatorsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// PromoteMemoryToRegister - This pass is used to promote memory references to
+// be register references. A simple example of the transformation performed by
+// this pass is:
+//
+//        FROM CODE                           TO CODE
+//   %X = alloca i32, i32 1                 ret i32 42
+//   store i32 42, i32 *%X
+//   %Y = load i32* %X
+//   ret i32 %Y
+//
+FunctionPass *createPromoteMemoryToRegisterPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopSimplify - Insert Pre-header blocks into the CFG for every function in
+// the module.  This pass updates dominator information, loop information, and
+// does not add critical edges to the CFG.
+//
+//   AU.addRequiredID(LoopSimplifyID);
+//
+Pass *createLoopSimplifyPass();
+extern char &LoopSimplifyID;
+
+//===----------------------------------------------------------------------===//
+//
+// InstructionSimplifier - Remove redundant instructions.
+//
+FunctionPass *createInstructionSimplifierPass();
+extern char &InstructionSimplifierID;
+
+/// This function returns a new pass that downgrades the debug info in the
+/// module to line tables only.
+ModulePass *createStripNonLineTableDebugInfoPass();
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/ASanStackFrameLayout.h b/linux-x64/clang/include/llvm/Transforms/Utils/ASanStackFrameLayout.h
new file mode 100644
index 0000000..eaad06a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/ASanStackFrameLayout.h
@@ -0,0 +1,81 @@
+//===- ASanStackFrameLayout.h - ComputeASanStackFrameLayout -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines ComputeASanStackFrameLayout and auxiliary data structs.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
+#define LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class AllocaInst;
+
+// These magic constants should be the same as in
+// in asan_internal.h from ASan runtime in compiler-rt.
+static const int kAsanStackLeftRedzoneMagic = 0xf1;
+static const int kAsanStackMidRedzoneMagic = 0xf2;
+static const int kAsanStackRightRedzoneMagic = 0xf3;
+static const int kAsanStackUseAfterReturnMagic = 0xf5;
+static const int kAsanStackUseAfterScopeMagic = 0xf8;
+
+// Input/output data struct for ComputeASanStackFrameLayout.
+struct ASanStackVariableDescription {
+  const char *Name;    // Name of the variable that will be displayed by asan
+                       // if a stack-related bug is reported.
+  uint64_t Size;       // Size of the variable in bytes.
+  size_t LifetimeSize; // Size in bytes to use for lifetime analysis check.
+                       // Will be rounded up to Granularity.
+  size_t Alignment;    // Alignment of the variable (power of 2).
+  AllocaInst *AI;      // The actual AllocaInst.
+  size_t Offset;       // Offset from the beginning of the frame;
+                       // set by ComputeASanStackFrameLayout.
+  unsigned Line;       // Line number.
+};
+
+// Output data struct for ComputeASanStackFrameLayout.
+struct ASanStackFrameLayout {
+  size_t Granularity;     // Shadow granularity.
+  size_t FrameAlignment;  // Alignment for the entire frame.
+  size_t FrameSize;       // Size of the frame in bytes.
+};
+
+ASanStackFrameLayout ComputeASanStackFrameLayout(
+    // The array of stack variables. The elements may get reordered and changed.
+    SmallVectorImpl<ASanStackVariableDescription> &Vars,
+    // AddressSanitizer's shadow granularity. Usually 8, may also be 16, 32, 64.
+    size_t Granularity,
+    // The minimal size of the left-most redzone (header).
+    // At least 4 pointer sizes, power of 2, and >= Granularity.
+    // The resulting FrameSize should be multiple of MinHeaderSize.
+    size_t MinHeaderSize);
+
+// Compute frame description, see DescribeAddressIfStack in ASan runtime.
+SmallString<64> ComputeASanStackFrameDescription(
+    const SmallVectorImpl<ASanStackVariableDescription> &Vars);
+
+// Returns shadow bytes with marked red zones. This shadow represents the state
+// if the stack frame when all local variables are inside of the own scope.
+SmallVector<uint8_t, 64>
+GetShadowBytes(const SmallVectorImpl<ASanStackVariableDescription> &Vars,
+               const ASanStackFrameLayout &Layout);
+
+// Returns shadow bytes with marked red zones and after scope. This shadow
+// represents the state if the stack frame when all local variables are outside
+// of the own scope.
+SmallVector<uint8_t, 64> GetShadowBytesAfterScope(
+    // The array of stack variables. The elements may get reordered and changed.
+    const SmallVectorImpl<ASanStackVariableDescription> &Vars,
+    const ASanStackFrameLayout &Layout);
+
+} // llvm namespace
+
+#endif  // LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/AddDiscriminators.h b/linux-x64/clang/include/llvm/Transforms/Utils/AddDiscriminators.h
new file mode 100644
index 0000000..4dad06e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/AddDiscriminators.h
@@ -0,0 +1,32 @@
+//===- AddDiscriminators.h --------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass adds DWARF discriminators to the IR. Path discriminators are used
+// to decide what CFG path was taken inside sub-graphs whose instructions share
+// the same line and column number information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_ADDDISCRIMINATORS_H
+#define LLVM_TRANSFORMS_UTILS_ADDDISCRIMINATORS_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+class AddDiscriminatorsPass : public PassInfoMixin<AddDiscriminatorsPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_ADDDISCRIMINATORS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/BasicBlockUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/BasicBlockUtils.h
new file mode 100644
index 0000000..6f0d2de
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -0,0 +1,314 @@
+//===- Transform/Utils/BasicBlockUtils.h - BasicBlock Utils -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions perform manipulations on basic blocks, and
+// instructions contained within basic blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H
+#define LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H
+
+// FIXME: Move to this file: BasicBlock::removePredecessor, BB::splitBasicBlock
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/InstrTypes.h"
+#include <cassert>
+
+namespace llvm {
+
+class BlockFrequencyInfo;
+class BranchProbabilityInfo;
+class DeferredDominance;
+class DominatorTree;
+class Function;
+class Instruction;
+class LoopInfo;
+class MDNode;
+class MemoryDependenceResults;
+class ReturnInst;
+class TargetLibraryInfo;
+class Value;
+
+/// Delete the specified block, which must have no predecessors.
+void DeleteDeadBlock(BasicBlock *BB, DeferredDominance *DDT = nullptr);
+
+/// We know that BB has one predecessor. If there are any single-entry PHI nodes
+/// in it, fold them away. This handles the case when all entries to the PHI
+/// nodes in a block are guaranteed equal, such as when the block has exactly
+/// one predecessor.
+void FoldSingleEntryPHINodes(BasicBlock *BB,
+                             MemoryDependenceResults *MemDep = nullptr);
+
+/// Examine each PHI in the given block and delete it if it is dead. Also
+/// recursively delete any operands that become dead as a result. This includes
+/// tracing the def-use list from the PHI to see if it is ultimately unused or
+/// if it reaches an unused cycle. Return true if any PHIs were deleted.
+bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = nullptr);
+
+/// Attempts to merge a block into its predecessor, if possible. The return
+/// value indicates success or failure.
+bool MergeBlockIntoPredecessor(BasicBlock *BB, DominatorTree *DT = nullptr,
+                               LoopInfo *LI = nullptr,
+                               MemoryDependenceResults *MemDep = nullptr);
+
+/// Replace all uses of an instruction (specified by BI) with a value, then
+/// remove and delete the original instruction.
+void ReplaceInstWithValue(BasicBlock::InstListType &BIL,
+                          BasicBlock::iterator &BI, Value *V);
+
+/// Replace the instruction specified by BI with the instruction specified by I.
+/// Copies DebugLoc from BI to I, if I doesn't already have a DebugLoc. The
+/// original instruction is deleted and BI is updated to point to the new
+/// instruction.
+void ReplaceInstWithInst(BasicBlock::InstListType &BIL,
+                         BasicBlock::iterator &BI, Instruction *I);
+
+/// Replace the instruction specified by From with the instruction specified by
+/// To. Copies DebugLoc from BI to I, if I doesn't already have a DebugLoc.
+void ReplaceInstWithInst(Instruction *From, Instruction *To);
+
+/// Option class for critical edge splitting.
+///
+/// This provides a builder interface for overriding the default options used
+/// during critical edge splitting.
+struct CriticalEdgeSplittingOptions {
+  DominatorTree *DT;
+  LoopInfo *LI;
+  bool MergeIdenticalEdges = false;
+  bool DontDeleteUselessPHIs = false;
+  bool PreserveLCSSA = false;
+
+  CriticalEdgeSplittingOptions(DominatorTree *DT = nullptr,
+                               LoopInfo *LI = nullptr)
+      : DT(DT), LI(LI) {}
+
+  CriticalEdgeSplittingOptions &setMergeIdenticalEdges() {
+    MergeIdenticalEdges = true;
+    return *this;
+  }
+
+  CriticalEdgeSplittingOptions &setDontDeleteUselessPHIs() {
+    DontDeleteUselessPHIs = true;
+    return *this;
+  }
+
+  CriticalEdgeSplittingOptions &setPreserveLCSSA() {
+    PreserveLCSSA = true;
+    return *this;
+  }
+};
+
+/// If this edge is a critical edge, insert a new node to split the critical
+/// edge. This will update the analyses passed in through the option struct.
+/// This returns the new block if the edge was split, null otherwise.
+///
+/// If MergeIdenticalEdges in the options struct is true (not the default),
+/// *all* edges from TI to the specified successor will be merged into the same
+/// critical edge block. This is most commonly interesting with switch
+/// instructions, which may have many edges to any one destination.  This
+/// ensures that all edges to that dest go to one block instead of each going
+/// to a different block, but isn't the standard definition of a "critical
+/// edge".
+///
+/// It is invalid to call this function on a critical edge that starts at an
+/// IndirectBrInst.  Splitting these edges will almost always create an invalid
+/// program because the address of the new block won't be the one that is jumped
+/// to.
+BasicBlock *SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
+                              const CriticalEdgeSplittingOptions &Options =
+                                  CriticalEdgeSplittingOptions());
+
+inline BasicBlock *
+SplitCriticalEdge(BasicBlock *BB, succ_iterator SI,
+                  const CriticalEdgeSplittingOptions &Options =
+                      CriticalEdgeSplittingOptions()) {
+  return SplitCriticalEdge(BB->getTerminator(), SI.getSuccessorIndex(),
+                           Options);
+}
+
+/// If the edge from *PI to BB is not critical, return false. Otherwise, split
+/// all edges between the two blocks and return true. This updates all of the
+/// same analyses as the other SplitCriticalEdge function. If P is specified, it
+/// updates the analyses described above.
+inline bool SplitCriticalEdge(BasicBlock *Succ, pred_iterator PI,
+                              const CriticalEdgeSplittingOptions &Options =
+                                  CriticalEdgeSplittingOptions()) {
+  bool MadeChange = false;
+  TerminatorInst *TI = (*PI)->getTerminator();
+  for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
+    if (TI->getSuccessor(i) == Succ)
+      MadeChange |= !!SplitCriticalEdge(TI, i, Options);
+  return MadeChange;
+}
+
+/// If an edge from Src to Dst is critical, split the edge and return true,
+/// otherwise return false. This method requires that there be an edge between
+/// the two blocks. It updates the analyses passed in the options struct
+inline BasicBlock *
+SplitCriticalEdge(BasicBlock *Src, BasicBlock *Dst,
+                  const CriticalEdgeSplittingOptions &Options =
+                      CriticalEdgeSplittingOptions()) {
+  TerminatorInst *TI = Src->getTerminator();
+  unsigned i = 0;
+  while (true) {
+    assert(i != TI->getNumSuccessors() && "Edge doesn't exist!");
+    if (TI->getSuccessor(i) == Dst)
+      return SplitCriticalEdge(TI, i, Options);
+    ++i;
+  }
+}
+
+/// Loop over all of the edges in the CFG, breaking critical edges as they are
+/// found. Returns the number of broken edges.
+unsigned SplitAllCriticalEdges(Function &F,
+                               const CriticalEdgeSplittingOptions &Options =
+                                   CriticalEdgeSplittingOptions());
+
+/// Split the edge connecting specified block.
+BasicBlock *SplitEdge(BasicBlock *From, BasicBlock *To,
+                      DominatorTree *DT = nullptr, LoopInfo *LI = nullptr);
+
+/// Split the specified block at the specified instruction - everything before
+/// SplitPt stays in Old and everything starting with SplitPt moves to a new
+/// block. The two blocks are joined by an unconditional branch and the loop
+/// info is updated.
+BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt,
+                       DominatorTree *DT = nullptr, LoopInfo *LI = nullptr);
+
+/// This method introduces at least one new basic block into the function and
+/// moves some of the predecessors of BB to be predecessors of the new block.
+/// The new predecessors are indicated by the Preds array. The new block is
+/// given a suffix of 'Suffix'. Returns new basic block to which predecessors
+/// from Preds are now pointing.
+///
+/// If BB is a landingpad block then additional basicblock might be introduced.
+/// It will have Suffix+".split_lp". See SplitLandingPadPredecessors for more
+/// details on this case.
+///
+/// This currently updates the LLVM IR, DominatorTree, LoopInfo, and LCCSA but
+/// no other analyses. In particular, it does not preserve LoopSimplify
+/// (because it's complicated to handle the case where one of the edges being
+/// split is an exit of a loop with other exits).
+BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock *> Preds,
+                                   const char *Suffix,
+                                   DominatorTree *DT = nullptr,
+                                   LoopInfo *LI = nullptr,
+                                   bool PreserveLCSSA = false);
+
+/// This method transforms the landing pad, OrigBB, by introducing two new basic
+/// blocks into the function. One of those new basic blocks gets the
+/// predecessors listed in Preds. The other basic block gets the remaining
+/// predecessors of OrigBB. The landingpad instruction OrigBB is clone into both
+/// of the new basic blocks. The new blocks are given the suffixes 'Suffix1' and
+/// 'Suffix2', and are returned in the NewBBs vector.
+///
+/// This currently updates the LLVM IR, DominatorTree, LoopInfo, and LCCSA but
+/// no other analyses. In particular, it does not preserve LoopSimplify
+/// (because it's complicated to handle the case where one of the edges being
+/// split is an exit of a loop with other exits).
+void SplitLandingPadPredecessors(BasicBlock *OrigBB,
+                                 ArrayRef<BasicBlock *> Preds,
+                                 const char *Suffix, const char *Suffix2,
+                                 SmallVectorImpl<BasicBlock *> &NewBBs,
+                                 DominatorTree *DT = nullptr,
+                                 LoopInfo *LI = nullptr,
+                                 bool PreserveLCSSA = false);
+
+/// This method duplicates the specified return instruction into a predecessor
+/// which ends in an unconditional branch. If the return instruction returns a
+/// value defined by a PHI, propagate the right value into the return. It
+/// returns the new return instruction in the predecessor.
+ReturnInst *FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
+                                       BasicBlock *Pred);
+
+/// Split the containing block at the specified instruction - everything before
+/// SplitBefore stays in the old basic block, and the rest of the instructions
+/// in the BB are moved to a new block. The two blocks are connected by a
+/// conditional branch (with value of Cmp being the condition).
+/// Before:
+///   Head
+///   SplitBefore
+///   Tail
+/// After:
+///   Head
+///   if (Cond)
+///     ThenBlock
+///   SplitBefore
+///   Tail
+///
+/// If Unreachable is true, then ThenBlock ends with
+/// UnreachableInst, otherwise it branches to Tail.
+/// Returns the NewBasicBlock's terminator.
+///
+/// Updates DT and LI if given.
+TerminatorInst *SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore,
+                                          bool Unreachable,
+                                          MDNode *BranchWeights = nullptr,
+                                          DominatorTree *DT = nullptr,
+                                          LoopInfo *LI = nullptr);
+
+/// SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen,
+/// but also creates the ElseBlock.
+/// Before:
+///   Head
+///   SplitBefore
+///   Tail
+/// After:
+///   Head
+///   if (Cond)
+///     ThenBlock
+///   else
+///     ElseBlock
+///   SplitBefore
+///   Tail
+void SplitBlockAndInsertIfThenElse(Value *Cond, Instruction *SplitBefore,
+                                   TerminatorInst **ThenTerm,
+                                   TerminatorInst **ElseTerm,
+                                   MDNode *BranchWeights = nullptr);
+
+/// Check whether BB is the merge point of a if-region.
+/// If so, return the boolean condition that determines which entry into
+/// BB will be taken.  Also, return by references the block that will be
+/// entered from if the condition is true, and the block that will be
+/// entered if the condition is false.
+///
+/// This does no checking to see if the true/false blocks have large or unsavory
+/// instructions in them.
+Value *GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
+                      BasicBlock *&IfFalse);
+
+// Split critical edges where the source of the edge is an indirectbr
+// instruction. This isn't always possible, but we can handle some easy cases.
+// This is useful because MI is unable to split such critical edges,
+// which means it will not be able to sink instructions along those edges.
+// This is especially painful for indirect branches with many successors, where
+// we end up having to prepare all outgoing values in the origin block.
+//
+// Our normal algorithm for splitting critical edges requires us to update
+// the outgoing edges of the edge origin block, but for an indirectbr this
+// is hard, since it would require finding and updating the block addresses
+// the indirect branch uses. But if a block only has a single indirectbr
+// predecessor, with the others being regular branches, we can do it in a
+// different way.
+// Say we have A -> D, B -> D, I -> D where only I -> D is an indirectbr.
+// We can split D into D0 and D1, where D0 contains only the PHIs from D,
+// and D1 is the D block body. We can then duplicate D0 as D0A and D0B, and
+// create the following structure:
+// A -> D0A, B -> D0A, I -> D0B, D0A -> D1, D0B -> D1
+// If BPI and BFI aren't non-null, BPI/BFI will be updated accordingly.
+bool SplitIndirectBrCriticalEdges(Function &F,
+                                  BranchProbabilityInfo *BPI = nullptr,
+                                  BlockFrequencyInfo *BFI = nullptr);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/BreakCriticalEdges.h b/linux-x64/clang/include/llvm/Transforms/Utils/BreakCriticalEdges.h
new file mode 100644
index 0000000..9cc81a1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/BreakCriticalEdges.h
@@ -0,0 +1,29 @@
+//===- BreakCriticalEdges.h - Critical Edge Elimination Pass --------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// BreakCriticalEdges pass - Break all of the critical edges in the CFG by
+// inserting a dummy basic block.  This pass may be "required" by passes that
+// cannot deal with critical edges.  For this usage, the structure type is
+// forward declared.  This pass obviously invalidates the CFG, but can update
+// dominator trees.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BREAKCRITICALEDGES_H
+#define LLVM_TRANSFORMS_UTILS_BREAKCRITICALEDGES_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+struct BreakCriticalEdgesPass : public PassInfoMixin<BreakCriticalEdgesPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // namespace llvm
+#endif // LLVM_TRANSFORMS_UTILS_BREAKCRITICALEDGES_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/BuildLibCalls.h b/linux-x64/clang/include/llvm/Transforms/Utils/BuildLibCalls.h
new file mode 100644
index 0000000..3a71559
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/BuildLibCalls.h
@@ -0,0 +1,125 @@
+//===- BuildLibCalls.h - Utility builder for libcalls -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes an interface to build some C language libcalls for
+// optimization passes that need to call the various functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BUILDLIBCALLS_H
+#define LLVM_TRANSFORMS_UTILS_BUILDLIBCALLS_H
+
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+  class Value;
+  class DataLayout;
+  class TargetLibraryInfo;
+
+  /// Analyze the name and prototype of the given function and set any
+  /// applicable attributes.
+  /// If the library function is unavailable, this doesn't modify it.
+  ///
+  /// Returns true if any attributes were set and false otherwise.
+  bool inferLibFuncAttributes(Function &F, const TargetLibraryInfo &TLI);
+
+  /// Check whether the overloaded unary floating point function
+  /// corresponding to \a Ty is available.
+  bool hasUnaryFloatFn(const TargetLibraryInfo *TLI, Type *Ty,
+                       LibFunc DoubleFn, LibFunc FloatFn,
+                       LibFunc LongDoubleFn);
+
+  /// Return V if it is an i8*, otherwise cast it to i8*.
+  Value *castToCStr(Value *V, IRBuilder<> &B);
+
+  /// Emit a call to the strlen function to the builder, for the specified
+  /// pointer. Ptr is required to be some pointer type, and the return value has
+  /// 'intptr_t' type.
+  Value *emitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout &DL,
+                    const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the strnlen function to the builder, for the specified
+  /// pointer. Ptr is required to be some pointer type, MaxLen must be of size_t
+  /// type, and the return value has 'intptr_t' type.
+  Value *emitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
+                     const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the strchr function to the builder, for the specified
+  /// pointer and character. Ptr is required to be some pointer type, and the
+  /// return value has 'i8*' type.
+  Value *emitStrChr(Value *Ptr, char C, IRBuilder<> &B,
+                    const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the strncmp function to the builder.
+  Value *emitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
+                     const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the strcpy function to the builder, for the specified
+  /// pointer arguments.
+  Value *emitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
+                    const TargetLibraryInfo *TLI, StringRef Name = "strcpy");
+
+  /// Emit a call to the strncpy function to the builder, for the specified
+  /// pointer arguments and length.
+  Value *emitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B,
+                     const TargetLibraryInfo *TLI, StringRef Name = "strncpy");
+
+  /// Emit a call to the __memcpy_chk function to the builder. This expects that
+  /// the Len and ObjSize have type 'intptr_t' and Dst/Src are pointers.
+  Value *emitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
+                       IRBuilder<> &B, const DataLayout &DL,
+                       const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the memchr function. This assumes that Ptr is a pointer,
+  /// Val is an i32 value, and Len is an 'intptr_t' value.
+  Value *emitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilder<> &B,
+                    const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the memcmp function.
+  Value *emitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
+                    const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the unary function named 'Name' (e.g.  'floor'). This
+  /// function is known to take a single of type matching 'Op' and returns one
+  /// value with the same type. If 'Op' is a long double, 'l' is added as the
+  /// suffix of name, if 'Op' is a float, we add a 'f' suffix.
+  Value *emitUnaryFloatFnCall(Value *Op, StringRef Name, IRBuilder<> &B,
+                              const AttributeList &Attrs);
+
+  /// Emit a call to the binary function named 'Name' (e.g. 'fmin'). This
+  /// function is known to take type matching 'Op1' and 'Op2' and return one
+  /// value with the same type. If 'Op1/Op2' are long double, 'l' is added as
+  /// the suffix of name, if 'Op1/Op2' are float, we add a 'f' suffix.
+  Value *emitBinaryFloatFnCall(Value *Op1, Value *Op2, StringRef Name,
+                               IRBuilder<> &B, const AttributeList &Attrs);
+
+  /// Emit a call to the putchar function. This assumes that Char is an integer.
+  Value *emitPutChar(Value *Char, IRBuilder<> &B, const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the puts function. This assumes that Str is some pointer.
+  Value *emitPutS(Value *Str, IRBuilder<> &B, const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the fputc function. This assumes that Char is an i32, and
+  /// File is a pointer to FILE.
+  Value *emitFPutC(Value *Char, Value *File, IRBuilder<> &B,
+                   const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the puts function. Str is required to be a pointer and
+  /// File is a pointer to FILE.
+  Value *emitFPutS(Value *Str, Value *File, IRBuilder<> &B,
+                   const TargetLibraryInfo *TLI);
+
+  /// Emit a call to the fwrite function. This assumes that Ptr is a pointer,
+  /// Size is an 'intptr_t', and File is a pointer to FILE.
+  Value *emitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B,
+                    const DataLayout &DL, const TargetLibraryInfo *TLI);
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/BypassSlowDivision.h b/linux-x64/clang/include/llvm/Transforms/Utils/BypassSlowDivision.h
new file mode 100644
index 0000000..6eca5ed
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/BypassSlowDivision.h
@@ -0,0 +1,70 @@
+//===- llvm/Transforms/Utils/BypassSlowDivision.h ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an optimization for div and rem on architectures that
+// execute short instructions significantly faster than longer instructions.
+// For example, on Intel Atom 32-bit divides are slow enough that during
+// runtime it is profitable to check the value of the operands, and if they are
+// positive and less than 256 use an unsigned 8-bit divide.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H
+#define LLVM_TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include <cstdint>
+
+namespace llvm {
+
+class BasicBlock;
+class Value;
+
+struct DivRemMapKey {
+  bool SignedOp;
+  Value *Dividend;
+  Value *Divisor;
+
+  DivRemMapKey(bool InSignedOp, Value *InDividend, Value *InDivisor)
+      : SignedOp(InSignedOp), Dividend(InDividend), Divisor(InDivisor) {}
+};
+
+template <> struct DenseMapInfo<DivRemMapKey> {
+  static bool isEqual(const DivRemMapKey &Val1, const DivRemMapKey &Val2) {
+    return Val1.SignedOp == Val2.SignedOp && Val1.Dividend == Val2.Dividend &&
+           Val1.Divisor == Val2.Divisor;
+  }
+
+  static DivRemMapKey getEmptyKey() {
+    return DivRemMapKey(false, nullptr, nullptr);
+  }
+
+  static DivRemMapKey getTombstoneKey() {
+    return DivRemMapKey(true, nullptr, nullptr);
+  }
+
+  static unsigned getHashValue(const DivRemMapKey &Val) {
+    return (unsigned)(reinterpret_cast<uintptr_t>(Val.Dividend) ^
+                      reinterpret_cast<uintptr_t>(Val.Divisor)) ^
+           (unsigned)Val.SignedOp;
+  }
+};
+
+/// This optimization identifies DIV instructions in a BB that can be
+/// profitably bypassed and carried out with a shorter, faster divide.
+///
+/// This optimization may add basic blocks immediately after BB; for obvious
+/// reasons, you shouldn't pass those blocks to bypassSlowDivision.
+bool bypassSlowDivision(
+    BasicBlock *BB, const DenseMap<unsigned int, unsigned int> &BypassWidth);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/CallPromotionUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/CallPromotionUtils.h
new file mode 100644
index 0000000..6e8ece7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/CallPromotionUtils.h
@@ -0,0 +1,54 @@
+//===- CallPromotionUtils.h - Utilities for call promotion ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares utilities useful for promoting indirect call sites to
+// direct call sites.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CALLPROMOTIONUTILS_H
+#define LLVM_TRANSFORMS_UTILS_CALLPROMOTIONUTILS_H
+
+#include "llvm/IR/CallSite.h"
+
+namespace llvm {
+
+/// Return true if the given indirect call site can be made to call \p Callee.
+///
+/// This function ensures that the number and type of the call site's arguments
+/// and return value match those of the given function. If the types do not
+/// match exactly, they must at least be bitcast compatible. If \p FailureReason
+/// is non-null and the indirect call cannot be promoted, the failure reason
+/// will be stored in it.
+bool isLegalToPromote(CallSite CS, Function *Callee,
+                      const char **FailureReason = nullptr);
+
+/// Promote the given indirect call site to unconditionally call \p Callee.
+///
+/// This function promotes the given call site, returning the direct call or
+/// invoke instruction. If the function type of the call site doesn't match that
+/// of the callee, bitcast instructions are inserted where appropriate. If \p
+/// RetBitCast is non-null, it will be used to store the return value bitcast,
+/// if created.
+Instruction *promoteCall(CallSite CS, Function *Callee,
+                         CastInst **RetBitCast = nullptr);
+
+/// Promote the given indirect call site to conditionally call \p Callee.
+///
+/// This function creates an if-then-else structure at the location of the call
+/// site. The original call site is moved into the "else" block. A clone of the
+/// indirect call site is promoted, placed in the "then" block, and returned. If
+/// \p BranchWeights is non-null, it will be used to set !prof metadata on the
+/// new conditional branch.
+Instruction *promoteCallWithIfThenElse(CallSite CS, Function *Callee,
+                                       MDNode *BranchWeights = nullptr);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_CALLPROMOTIONUTILS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/Cloning.h b/linux-x64/clang/include/llvm/Transforms/Utils/Cloning.h
new file mode 100644
index 0000000..cd02ca6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/Cloning.h
@@ -0,0 +1,272 @@
+//===- Cloning.h - Clone various parts of LLVM programs ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines various functions that are used to clone chunks of LLVM
+// code for various purposes.  This varies from copying whole modules into new
+// modules, to cloning functions with different arguments, to inlining
+// functions, to copying basic blocks to support loop unrolling or superblock
+// formation, etc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CLONING_H
+#define LLVM_TRANSFORMS_UTILS_CLONING_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+#include <functional>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+class AllocaInst;
+class BasicBlock;
+class BlockFrequencyInfo;
+class CallInst;
+class CallGraph;
+class DebugInfoFinder;
+class DominatorTree;
+class Function;
+class Instruction;
+class InvokeInst;
+class Loop;
+class LoopInfo;
+class Module;
+class ProfileSummaryInfo;
+class ReturnInst;
+
+/// Return an exact copy of the specified module
+///
+std::unique_ptr<Module> CloneModule(const Module &M);
+std::unique_ptr<Module> CloneModule(const Module &M, ValueToValueMapTy &VMap);
+
+/// Return a copy of the specified module. The ShouldCloneDefinition function
+/// controls whether a specific GlobalValue's definition is cloned. If the
+/// function returns false, the module copy will contain an external reference
+/// in place of the global definition.
+std::unique_ptr<Module>
+CloneModule(const Module &M, ValueToValueMapTy &VMap,
+            function_ref<bool(const GlobalValue *)> ShouldCloneDefinition);
+
+/// ClonedCodeInfo - This struct can be used to capture information about code
+/// being cloned, while it is being cloned.
+struct ClonedCodeInfo {
+  /// ContainsCalls - This is set to true if the cloned code contains a normal
+  /// call instruction.
+  bool ContainsCalls = false;
+
+  /// ContainsDynamicAllocas - This is set to true if the cloned code contains
+  /// a 'dynamic' alloca.  Dynamic allocas are allocas that are either not in
+  /// the entry block or they are in the entry block but are not a constant
+  /// size.
+  bool ContainsDynamicAllocas = false;
+
+  /// All cloned call sites that have operand bundles attached are appended to
+  /// this vector.  This vector may contain nulls or undefs if some of the
+  /// originally inserted callsites were DCE'ed after they were cloned.
+  std::vector<WeakTrackingVH> OperandBundleCallSites;
+
+  ClonedCodeInfo() = default;
+};
+
+/// CloneBasicBlock - Return a copy of the specified basic block, but without
+/// embedding the block into a particular function.  The block returned is an
+/// exact copy of the specified basic block, without any remapping having been
+/// performed.  Because of this, this is only suitable for applications where
+/// the basic block will be inserted into the same function that it was cloned
+/// from (loop unrolling would use this, for example).
+///
+/// Also, note that this function makes a direct copy of the basic block, and
+/// can thus produce illegal LLVM code.  In particular, it will copy any PHI
+/// nodes from the original block, even though there are no predecessors for the
+/// newly cloned block (thus, phi nodes will have to be updated).  Also, this
+/// block will branch to the old successors of the original block: these
+/// successors will have to have any PHI nodes updated to account for the new
+/// incoming edges.
+///
+/// The correlation between instructions in the source and result basic blocks
+/// is recorded in the VMap map.
+///
+/// If you have a particular suffix you'd like to use to add to any cloned
+/// names, specify it as the optional third parameter.
+///
+/// If you would like the basic block to be auto-inserted into the end of a
+/// function, you can specify it as the optional fourth parameter.
+///
+/// If you would like to collect additional information about the cloned
+/// function, you can specify a ClonedCodeInfo object with the optional fifth
+/// parameter.
+///
+BasicBlock *CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap,
+                            const Twine &NameSuffix = "", Function *F = nullptr,
+                            ClonedCodeInfo *CodeInfo = nullptr,
+                            DebugInfoFinder *DIFinder = nullptr);
+
+/// CloneFunction - Return a copy of the specified function and add it to that
+/// function's module.  Also, any references specified in the VMap are changed
+/// to refer to their mapped value instead of the original one.  If any of the
+/// arguments to the function are in the VMap, the arguments are deleted from
+/// the resultant function.  The VMap is updated to include mappings from all of
+/// the instructions and basicblocks in the function from their old to new
+/// values.  The final argument captures information about the cloned code if
+/// non-null.
+///
+/// VMap contains no non-identity GlobalValue mappings and debug info metadata
+/// will not be cloned.
+///
+Function *CloneFunction(Function *F, ValueToValueMapTy &VMap,
+                        ClonedCodeInfo *CodeInfo = nullptr);
+
+/// Clone OldFunc into NewFunc, transforming the old arguments into references
+/// to VMap values.  Note that if NewFunc already has basic blocks, the ones
+/// cloned into it will be added to the end of the function.  This function
+/// fills in a list of return instructions, and can optionally remap types
+/// and/or append the specified suffix to all values cloned.
+///
+/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
+/// mappings.
+///
+void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
+                       ValueToValueMapTy &VMap, bool ModuleLevelChanges,
+                       SmallVectorImpl<ReturnInst*> &Returns,
+                       const char *NameSuffix = "",
+                       ClonedCodeInfo *CodeInfo = nullptr,
+                       ValueMapTypeRemapper *TypeMapper = nullptr,
+                       ValueMaterializer *Materializer = nullptr);
+
+void CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc,
+                               const Instruction *StartingInst,
+                               ValueToValueMapTy &VMap, bool ModuleLevelChanges,
+                               SmallVectorImpl<ReturnInst *> &Returns,
+                               const char *NameSuffix = "",
+                               ClonedCodeInfo *CodeInfo = nullptr);
+
+/// CloneAndPruneFunctionInto - This works exactly like CloneFunctionInto,
+/// except that it does some simple constant prop and DCE on the fly.  The
+/// effect of this is to copy significantly less code in cases where (for
+/// example) a function call with constant arguments is inlined, and those
+/// constant arguments cause a significant amount of code in the callee to be
+/// dead.  Since this doesn't produce an exactly copy of the input, it can't be
+/// used for things like CloneFunction or CloneModule.
+///
+/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
+/// mappings.
+///
+void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
+                               ValueToValueMapTy &VMap, bool ModuleLevelChanges,
+                               SmallVectorImpl<ReturnInst*> &Returns,
+                               const char *NameSuffix = "",
+                               ClonedCodeInfo *CodeInfo = nullptr,
+                               Instruction *TheCall = nullptr);
+
+/// InlineFunctionInfo - This class captures the data input to the
+/// InlineFunction call, and records the auxiliary results produced by it.
+class InlineFunctionInfo {
+public:
+  explicit InlineFunctionInfo(CallGraph *cg = nullptr,
+                              std::function<AssumptionCache &(Function &)>
+                                  *GetAssumptionCache = nullptr,
+                              ProfileSummaryInfo *PSI = nullptr,
+                              BlockFrequencyInfo *CallerBFI = nullptr,
+                              BlockFrequencyInfo *CalleeBFI = nullptr)
+      : CG(cg), GetAssumptionCache(GetAssumptionCache), PSI(PSI),
+        CallerBFI(CallerBFI), CalleeBFI(CalleeBFI) {}
+
+  /// CG - If non-null, InlineFunction will update the callgraph to reflect the
+  /// changes it makes.
+  CallGraph *CG;
+  std::function<AssumptionCache &(Function &)> *GetAssumptionCache;
+  ProfileSummaryInfo *PSI;
+  BlockFrequencyInfo *CallerBFI, *CalleeBFI;
+
+  /// StaticAllocas - InlineFunction fills this in with all static allocas that
+  /// get copied into the caller.
+  SmallVector<AllocaInst *, 4> StaticAllocas;
+
+  /// InlinedCalls - InlineFunction fills this in with callsites that were
+  /// inlined from the callee.  This is only filled in if CG is non-null.
+  SmallVector<WeakTrackingVH, 8> InlinedCalls;
+
+  /// All of the new call sites inlined into the caller.
+  ///
+  /// 'InlineFunction' fills this in by scanning the inlined instructions, and
+  /// only if CG is null. If CG is non-null, instead the value handle
+  /// `InlinedCalls` above is used.
+  SmallVector<CallSite, 8> InlinedCallSites;
+
+  void reset() {
+    StaticAllocas.clear();
+    InlinedCalls.clear();
+    InlinedCallSites.clear();
+  }
+};
+
+/// InlineFunction - This function inlines the called function into the basic
+/// block of the caller.  This returns false if it is not possible to inline
+/// this call.  The program is still in a well defined state if this occurs
+/// though.
+///
+/// Note that this only does one level of inlining.  For example, if the
+/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
+/// exists in the instruction stream.  Similarly this will inline a recursive
+/// function by one level.
+///
+/// Note that while this routine is allowed to cleanup and optimize the
+/// *inlined* code to minimize the actual inserted code, it must not delete
+/// code in the caller as users of this routine may have pointers to
+/// instructions in the caller that need to remain stable.
+///
+/// If ForwardVarArgsTo is passed, inlining a function with varargs is allowed
+/// and all varargs at the callsite will be passed to any calls to
+/// ForwardVarArgsTo. The caller of InlineFunction has to make sure any varargs
+/// are only used by ForwardVarArgsTo.
+bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI,
+                    AAResults *CalleeAAR = nullptr, bool InsertLifetime = true);
+bool InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
+                    AAResults *CalleeAAR = nullptr, bool InsertLifetime = true);
+bool InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
+                    AAResults *CalleeAAR = nullptr, bool InsertLifetime = true,
+                    Function *ForwardVarArgsTo = nullptr);
+
+/// \brief Clones a loop \p OrigLoop.  Returns the loop and the blocks in \p
+/// Blocks.
+///
+/// Updates LoopInfo and DominatorTree assuming the loop is dominated by block
+/// \p LoopDomBB.  Insert the new blocks before block specified in \p Before.
+/// Note: Only innermost loops are supported.
+Loop *cloneLoopWithPreheader(BasicBlock *Before, BasicBlock *LoopDomBB,
+                             Loop *OrigLoop, ValueToValueMapTy &VMap,
+                             const Twine &NameSuffix, LoopInfo *LI,
+                             DominatorTree *DT,
+                             SmallVectorImpl<BasicBlock *> &Blocks);
+
+/// \brief Remaps instructions in \p Blocks using the mapping in \p VMap.
+void remapInstructionsInBlocks(const SmallVectorImpl<BasicBlock *> &Blocks,
+                               ValueToValueMapTy &VMap);
+
+/// Split edge between BB and PredBB and duplicate all non-Phi instructions
+/// from BB between its beginning and the StopAt instruction into the split
+/// block. Phi nodes are not duplicated, but their uses are handled correctly:
+/// we replace them with the uses of corresponding Phi inputs. ValueMapping
+/// is used to map the original instructions from BB to their newly-created
+/// copies. Returns the split block.
+BasicBlock *
+DuplicateInstructionsInSplitBetween(BasicBlock *BB, BasicBlock *PredBB,
+                                    Instruction *StopAt,
+                                    ValueToValueMapTy &ValueMapping,
+                                    DominatorTree *DT = nullptr);
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_CLONING_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/CodeExtractor.h b/linux-x64/clang/include/llvm/Transforms/Utils/CodeExtractor.h
new file mode 100644
index 0000000..63d3451
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/CodeExtractor.h
@@ -0,0 +1,172 @@
+//===- Transform/Utils/CodeExtractor.h - Code extraction util ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A utility to support extracting code from one function into its own
+// stand-alone function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CODEEXTRACTOR_H
+#define LLVM_TRANSFORMS_UTILS_CODEEXTRACTOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SetVector.h"
+#include <limits>
+
+namespace llvm {
+
+class BasicBlock;
+class BlockFrequency;
+class BlockFrequencyInfo;
+class BranchProbabilityInfo;
+class DominatorTree;
+class Function;
+class Instruction;
+class Loop;
+class Module;
+class Type;
+class Value;
+
+  /// \brief Utility class for extracting code into a new function.
+  ///
+  /// This utility provides a simple interface for extracting some sequence of
+  /// code into its own function, replacing it with a call to that function. It
+  /// also provides various methods to query about the nature and result of
+  /// such a transformation.
+  ///
+  /// The rough algorithm used is:
+  /// 1) Find both the inputs and outputs for the extracted region.
+  /// 2) Pass the inputs as arguments, remapping them within the extracted
+  ///    function to arguments.
+  /// 3) Add allocas for any scalar outputs, adding all of the outputs' allocas
+  ///    as arguments, and inserting stores to the arguments for any scalars.
+  class CodeExtractor {
+    using ValueSet = SetVector<Value *>;
+
+    // Various bits of state computed on construction.
+    DominatorTree *const DT;
+    const bool AggregateArgs;
+    BlockFrequencyInfo *BFI;
+    BranchProbabilityInfo *BPI;
+
+    // If true, varargs functions can be extracted.
+    bool AllowVarArgs;
+
+    // Bits of intermediate state computed at various phases of extraction.
+    SetVector<BasicBlock *> Blocks;
+    unsigned NumExitBlocks = std::numeric_limits<unsigned>::max();
+    Type *RetTy;
+
+  public:
+    /// \brief Create a code extractor for a sequence of blocks.
+    ///
+    /// Given a sequence of basic blocks where the first block in the sequence
+    /// dominates the rest, prepare a code extractor object for pulling this
+    /// sequence out into its new function. When a DominatorTree is also given,
+    /// extra checking and transformations are enabled. If AllowVarArgs is true,
+    /// vararg functions can be extracted. This is safe, if all vararg handling
+    /// code is extracted, including vastart.
+    CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT = nullptr,
+                  bool AggregateArgs = false, BlockFrequencyInfo *BFI = nullptr,
+                  BranchProbabilityInfo *BPI = nullptr,
+                  bool AllowVarArgs = false);
+
+    /// \brief Create a code extractor for a loop body.
+    ///
+    /// Behaves just like the generic code sequence constructor, but uses the
+    /// block sequence of the loop.
+    CodeExtractor(DominatorTree &DT, Loop &L, bool AggregateArgs = false,
+                  BlockFrequencyInfo *BFI = nullptr,
+                  BranchProbabilityInfo *BPI = nullptr);
+
+    /// \brief Check to see if a block is valid for extraction.
+    ///
+    /// Blocks containing EHPads, allocas and invokes are not valid. If
+    /// AllowVarArgs is true, blocks with vastart can be extracted. This is
+    /// safe, if all vararg handling code is extracted, including vastart.
+    static bool isBlockValidForExtraction(const BasicBlock &BB,
+                                          bool AllowVarArgs);
+
+    /// \brief Perform the extraction, returning the new function.
+    ///
+    /// Returns zero when called on a CodeExtractor instance where isEligible
+    /// returns false.
+    Function *extractCodeRegion();
+
+    /// \brief Test whether this code extractor is eligible.
+    ///
+    /// Based on the blocks used when constructing the code extractor,
+    /// determine whether it is eligible for extraction.
+    bool isEligible() const { return !Blocks.empty(); }
+
+    /// \brief Compute the set of input values and output values for the code.
+    ///
+    /// These can be used either when performing the extraction or to evaluate
+    /// the expected size of a call to the extracted function. Note that this
+    /// work cannot be cached between the two as once we decide to extract
+    /// a code sequence, that sequence is modified, including changing these
+    /// sets, before extraction occurs. These modifications won't have any
+    /// significant impact on the cost however.
+    void findInputsOutputs(ValueSet &Inputs, ValueSet &Outputs,
+                           const ValueSet &Allocas) const;
+
+    /// Check if life time marker nodes can be hoisted/sunk into the outline
+    /// region.
+    ///
+    /// Returns true if it is safe to do the code motion.
+    bool isLegalToShrinkwrapLifetimeMarkers(Instruction *AllocaAddr) const;
+
+    /// Find the set of allocas whose life ranges are contained within the
+    /// outlined region.
+    ///
+    /// Allocas which have life_time markers contained in the outlined region
+    /// should be pushed to the outlined function. The address bitcasts that
+    /// are used by the lifetime markers are also candidates for shrink-
+    /// wrapping. The instructions that need to be sunk are collected in
+    /// 'Allocas'.
+    void findAllocas(ValueSet &SinkCands, ValueSet &HoistCands,
+                     BasicBlock *&ExitBlock) const;
+
+    /// Find or create a block within the outline region for placing hoisted
+    /// code.
+    ///
+    /// CommonExitBlock is block outside the outline region. It is the common
+    /// successor of blocks inside the region. If there exists a single block
+    /// inside the region that is the predecessor of CommonExitBlock, that block
+    /// will be returned. Otherwise CommonExitBlock will be split and the
+    /// original block will be added to the outline region.
+    BasicBlock *findOrCreateBlockForHoisting(BasicBlock *CommonExitBlock);
+
+  private:
+    void severSplitPHINodes(BasicBlock *&Header);
+    void splitReturnBlocks();
+
+    Function *constructFunction(const ValueSet &inputs,
+                                const ValueSet &outputs,
+                                BasicBlock *header,
+                                BasicBlock *newRootNode, BasicBlock *newHeader,
+                                Function *oldFunction, Module *M);
+
+    void moveCodeToFunction(Function *newFunction);
+
+    void calculateNewCallTerminatorWeights(
+        BasicBlock *CodeReplacer,
+        DenseMap<BasicBlock *, BlockFrequency> &ExitWeights,
+        BranchProbabilityInfo *BPI);
+
+    void emitCallAndSwitchStatement(Function *newFunction,
+                                    BasicBlock *newHeader,
+                                    ValueSet &inputs,
+                                    ValueSet &outputs);
+  };
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_CODEEXTRACTOR_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/CtorUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/CtorUtils.h
new file mode 100644
index 0000000..63e564d
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/CtorUtils.h
@@ -0,0 +1,32 @@
+//===- CtorUtils.h - Helpers for working with global_ctors ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions that are used to process llvm.global_ctors.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CTORUTILS_H
+#define LLVM_TRANSFORMS_UTILS_CTORUTILS_H
+
+#include "llvm/ADT/STLExtras.h"
+
+namespace llvm {
+
+class GlobalVariable;
+class Function;
+class Module;
+
+/// Call "ShouldRemove" for every entry in M's global_ctor list and remove the
+/// entries for which it returns true.  Return true if anything changed.
+bool optimizeGlobalCtorsList(Module &M,
+                             function_ref<bool(Function *)> ShouldRemove);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/EntryExitInstrumenter.h b/linux-x64/clang/include/llvm/Transforms/Utils/EntryExitInstrumenter.h
new file mode 100644
index 0000000..f50c5c9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/EntryExitInstrumenter.h
@@ -0,0 +1,36 @@
+//===- EntryExitInstrumenter.h - Function Entry/Exit Instrumentation ------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// EntryExitInstrumenter pass - Instrument function entry/exit with calls to
+// mcount(), @__cyg_profile_func_{enter,exit} and the like. There are two
+// variants, intended to run pre- and post-inlining, respectively.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_ENTRYEXITINSTRUMENTER_H
+#define LLVM_TRANSFORMS_UTILS_ENTRYEXITINSTRUMENTER_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+struct EntryExitInstrumenterPass
+    : public PassInfoMixin<EntryExitInstrumenterPass> {
+  EntryExitInstrumenterPass(bool PostInlining) : PostInlining(PostInlining) {}
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  bool PostInlining;
+};
+
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_ENTRYEXITINSTRUMENTER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/EscapeEnumerator.h b/linux-x64/clang/include/llvm/Transforms/Utils/EscapeEnumerator.h
new file mode 100644
index 0000000..1256dfd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/EscapeEnumerator.h
@@ -0,0 +1,49 @@
+//===-- EscapeEnumerator.h --------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a helper class that enumerates all possible exits from a function,
+// including exception handling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_ESCAPEENUMERATOR_H
+#define LLVM_TRANSFORMS_UTILS_ESCAPEENUMERATOR_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+
+/// EscapeEnumerator - This is a little algorithm to find all escape points
+/// from a function so that "finally"-style code can be inserted. In addition
+/// to finding the existing return and unwind instructions, it also (if
+/// necessary) transforms any call instructions into invokes and sends them to
+/// a landing pad.
+class EscapeEnumerator {
+  Function &F;
+  const char *CleanupBBName;
+
+  Function::iterator StateBB, StateE;
+  IRBuilder<> Builder;
+  bool Done;
+  bool HandleExceptions;
+
+public:
+  EscapeEnumerator(Function &F, const char *N = "cleanup",
+                   bool HandleExceptions = true)
+      : F(F), CleanupBBName(N), StateBB(F.begin()), StateE(F.end()),
+        Builder(F.getContext()), Done(false),
+        HandleExceptions(HandleExceptions) {}
+
+  IRBuilder<> *Next();
+};
+
+}
+
+#endif // LLVM_TRANSFORMS_UTILS_ESCAPEENUMERATOR_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/Evaluator.h b/linux-x64/clang/include/llvm/Transforms/Utils/Evaluator.h
new file mode 100644
index 0000000..0e987b9
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/Evaluator.h
@@ -0,0 +1,120 @@
+//===- Evaluator.h - LLVM IR evaluator --------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Function evaluator for LLVM IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_EVALUATOR_H
+#define LLVM_TRANSFORMS_UTILS_EVALUATOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+#include <deque>
+#include <memory>
+
+namespace llvm {
+
+class DataLayout;
+class Function;
+class TargetLibraryInfo;
+
+/// This class evaluates LLVM IR, producing the Constant representing each SSA
+/// instruction.  Changes to global variables are stored in a mapping that can
+/// be iterated over after the evaluation is complete.  Once an evaluation call
+/// fails, the evaluation object should not be reused.
+class Evaluator {
+public:
+  Evaluator(const DataLayout &DL, const TargetLibraryInfo *TLI)
+      : DL(DL), TLI(TLI) {
+    ValueStack.emplace_back();
+  }
+
+  ~Evaluator() {
+    for (auto &Tmp : AllocaTmps)
+      // If there are still users of the alloca, the program is doing something
+      // silly, e.g. storing the address of the alloca somewhere and using it
+      // later.  Since this is undefined, we'll just make it be null.
+      if (!Tmp->use_empty())
+        Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType()));
+  }
+
+  /// Evaluate a call to function F, returning true if successful, false if we
+  /// can't evaluate it.  ActualArgs contains the formal arguments for the
+  /// function.
+  bool EvaluateFunction(Function *F, Constant *&RetVal,
+                        const SmallVectorImpl<Constant*> &ActualArgs);
+
+  /// Evaluate all instructions in block BB, returning true if successful, false
+  /// if we can't evaluate it.  NewBB returns the next BB that control flows
+  /// into, or null upon return.
+  bool EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB);
+
+  Constant *getVal(Value *V) {
+    if (Constant *CV = dyn_cast<Constant>(V)) return CV;
+    Constant *R = ValueStack.back().lookup(V);
+    assert(R && "Reference to an uncomputed value!");
+    return R;
+  }
+
+  void setVal(Value *V, Constant *C) {
+    ValueStack.back()[V] = C;
+  }
+
+  const DenseMap<Constant*, Constant*> &getMutatedMemory() const {
+    return MutatedMemory;
+  }
+
+  const SmallPtrSetImpl<GlobalVariable*> &getInvariants() const {
+    return Invariants;
+  }
+
+private:
+  Constant *ComputeLoadResult(Constant *P);
+
+  /// As we compute SSA register values, we store their contents here. The back
+  /// of the deque contains the current function and the stack contains the
+  /// values in the calling frames.
+  std::deque<DenseMap<Value*, Constant*>> ValueStack;
+
+  /// This is used to detect recursion.  In pathological situations we could hit
+  /// exponential behavior, but at least there is nothing unbounded.
+  SmallVector<Function*, 4> CallStack;
+
+  /// For each store we execute, we update this map.  Loads check this to get
+  /// the most up-to-date value.  If evaluation is successful, this state is
+  /// committed to the process.
+  DenseMap<Constant*, Constant*> MutatedMemory;
+
+  /// To 'execute' an alloca, we create a temporary global variable to represent
+  /// its body.  This vector is needed so we can delete the temporary globals
+  /// when we are done.
+  SmallVector<std::unique_ptr<GlobalVariable>, 32> AllocaTmps;
+
+  /// These global variables have been marked invariant by the static
+  /// constructor.
+  SmallPtrSet<GlobalVariable*, 8> Invariants;
+
+  /// These are constants we have checked and know to be simple enough to live
+  /// in a static initializer of a global.
+  SmallPtrSet<Constant*, 8> SimpleConstants;
+
+  const DataLayout &DL;
+  const TargetLibraryInfo *TLI;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_EVALUATOR_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/FunctionComparator.h b/linux-x64/clang/include/llvm/Transforms/Utils/FunctionComparator.h
new file mode 100644
index 0000000..7698a06
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/FunctionComparator.h
@@ -0,0 +1,393 @@
+//===- FunctionComparator.h - Function Comparator ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the FunctionComparator and GlobalNumberState classes which
+// are used by the MergeFunctions pass for comparing functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_FUNCTIONCOMPARATOR_H
+#define LLVM_TRANSFORMS_UTILS_FUNCTIONCOMPARATOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Instructions.h" 
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueMap.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include <cstdint>
+#include <tuple>
+
+namespace llvm {
+
+class APFloat;
+class APInt;
+class BasicBlock;
+class Constant;
+class Function;
+class GlobalValue;
+class InlineAsm;
+class Instruction;
+class MDNode;
+class Type;
+class Value;
+
+/// GlobalNumberState assigns an integer to each global value in the program,
+/// which is used by the comparison routine to order references to globals. This
+/// state must be preserved throughout the pass, because Functions and other
+/// globals need to maintain their relative order. Globals are assigned a number
+/// when they are first visited. This order is deterministic, and so the
+/// assigned numbers are as well. When two functions are merged, neither number
+/// is updated. If the symbols are weak, this would be incorrect. If they are
+/// strong, then one will be replaced at all references to the other, and so
+/// direct callsites will now see one or the other symbol, and no update is
+/// necessary. Note that if we were guaranteed unique names, we could just
+/// compare those, but this would not work for stripped bitcodes or for those
+/// few symbols without a name.
+class GlobalNumberState {
+  struct Config : ValueMapConfig<GlobalValue *> {
+    enum { FollowRAUW = false };
+  };
+
+  // Each GlobalValue is mapped to an identifier. The Config ensures when RAUW
+  // occurs, the mapping does not change. Tracking changes is unnecessary, and
+  // also problematic for weak symbols (which may be overwritten).
+  using ValueNumberMap = ValueMap<GlobalValue *, uint64_t, Config>;
+  ValueNumberMap GlobalNumbers;
+
+  // The next unused serial number to assign to a global.
+  uint64_t NextNumber = 0;
+
+public:
+  GlobalNumberState() = default;
+
+  uint64_t getNumber(GlobalValue* Global) {
+    ValueNumberMap::iterator MapIter;
+    bool Inserted;
+    std::tie(MapIter, Inserted) = GlobalNumbers.insert({Global, NextNumber});
+    if (Inserted)
+      NextNumber++;
+    return MapIter->second;
+  }
+
+  void erase(GlobalValue *Global) {
+    GlobalNumbers.erase(Global);
+  }
+
+  void clear() {
+    GlobalNumbers.clear();
+  }
+};
+
+/// FunctionComparator - Compares two functions to determine whether or not
+/// they will generate machine code with the same behaviour. DataLayout is
+/// used if available. The comparator always fails conservatively (erring on the
+/// side of claiming that two functions are different).
+class FunctionComparator {
+public:
+  FunctionComparator(const Function *F1, const Function *F2,
+                     GlobalNumberState* GN)
+      : FnL(F1), FnR(F2), GlobalNumbers(GN) {}
+
+  /// Test whether the two functions have equivalent behaviour.
+  int compare();
+
+  /// Hash a function. Equivalent functions will have the same hash, and unequal
+  /// functions will have different hashes with high probability.
+  using FunctionHash = uint64_t;
+  static FunctionHash functionHash(Function &);
+
+protected:
+  /// Start the comparison.
+  void beginCompare() {
+    sn_mapL.clear();
+    sn_mapR.clear();
+  }
+
+  /// Compares the signature and other general attributes of the two functions.
+  int compareSignature() const;
+
+  /// Test whether two basic blocks have equivalent behaviour.
+  int cmpBasicBlocks(const BasicBlock *BBL, const BasicBlock *BBR) const;
+
+  /// Constants comparison.
+  /// Its analog to lexicographical comparison between hypothetical numbers
+  /// of next format:
+  /// <bitcastability-trait><raw-bit-contents>
+  ///
+  /// 1. Bitcastability.
+  /// Check whether L's type could be losslessly bitcasted to R's type.
+  /// On this stage method, in case when lossless bitcast is not possible
+  /// method returns -1 or 1, thus also defining which type is greater in
+  /// context of bitcastability.
+  /// Stage 0: If types are equal in terms of cmpTypes, then we can go straight
+  ///          to the contents comparison.
+  ///          If types differ, remember types comparison result and check
+  ///          whether we still can bitcast types.
+  /// Stage 1: Types that satisfies isFirstClassType conditions are always
+  ///          greater then others.
+  /// Stage 2: Vector is greater then non-vector.
+  ///          If both types are vectors, then vector with greater bitwidth is
+  ///          greater.
+  ///          If both types are vectors with the same bitwidth, then types
+  ///          are bitcastable, and we can skip other stages, and go to contents
+  ///          comparison.
+  /// Stage 3: Pointer types are greater than non-pointers. If both types are
+  ///          pointers of the same address space - go to contents comparison.
+  ///          Different address spaces: pointer with greater address space is
+  ///          greater.
+  /// Stage 4: Types are neither vectors, nor pointers. And they differ.
+  ///          We don't know how to bitcast them. So, we better don't do it,
+  ///          and return types comparison result (so it determines the
+  ///          relationship among constants we don't know how to bitcast).
+  ///
+  /// Just for clearance, let's see how the set of constants could look
+  /// on single dimension axis:
+  ///
+  /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors]
+  /// Where: NFCT - Not a FirstClassType
+  ///        FCT - FirstClassTyp:
+  ///
+  /// 2. Compare raw contents.
+  /// It ignores types on this stage and only compares bits from L and R.
+  /// Returns 0, if L and R has equivalent contents.
+  /// -1 or 1 if values are different.
+  /// Pretty trivial:
+  /// 2.1. If contents are numbers, compare numbers.
+  ///    Ints with greater bitwidth are greater. Ints with same bitwidths
+  ///    compared by their contents.
+  /// 2.2. "And so on". Just to avoid discrepancies with comments
+  /// perhaps it would be better to read the implementation itself.
+  /// 3. And again about overall picture. Let's look back at how the ordered set
+  /// of constants will look like:
+  /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors]
+  ///
+  /// Now look, what could be inside [FCT, "others"], for example:
+  /// [FCT, "others"] =
+  /// [
+  ///   [double 0.1], [double 1.23],
+  ///   [i32 1], [i32 2],
+  ///   { double 1.0 },       ; StructTyID, NumElements = 1
+  ///   { i32 1 },            ; StructTyID, NumElements = 1
+  ///   { double 1, i32 1 },  ; StructTyID, NumElements = 2
+  ///   { i32 1, double 1 }   ; StructTyID, NumElements = 2
+  /// ]
+  ///
+  /// Let's explain the order. Float numbers will be less than integers, just
+  /// because of cmpType terms: FloatTyID < IntegerTyID.
+  /// Floats (with same fltSemantics) are sorted according to their value.
+  /// Then you can see integers, and they are, like a floats,
+  /// could be easy sorted among each others.
+  /// The structures. Structures are grouped at the tail, again because of their
+  /// TypeID: StructTyID > IntegerTyID > FloatTyID.
+  /// Structures with greater number of elements are greater. Structures with
+  /// greater elements going first are greater.
+  /// The same logic with vectors, arrays and other possible complex types.
+  ///
+  /// Bitcastable constants.
+  /// Let's assume, that some constant, belongs to some group of
+  /// "so-called-equal" values with different types, and at the same time
+  /// belongs to another group of constants with equal types
+  /// and "really" equal values.
+  ///
+  /// Now, prove that this is impossible:
+  ///
+  /// If constant A with type TyA is bitcastable to B with type TyB, then:
+  /// 1. All constants with equal types to TyA, are bitcastable to B. Since
+  ///    those should be vectors (if TyA is vector), pointers
+  ///    (if TyA is pointer), or else (if TyA equal to TyB), those types should
+  ///    be equal to TyB.
+  /// 2. All constants with non-equal, but bitcastable types to TyA, are
+  ///    bitcastable to B.
+  ///    Once again, just because we allow it to vectors and pointers only.
+  ///    This statement could be expanded as below:
+  /// 2.1. All vectors with equal bitwidth to vector A, has equal bitwidth to
+  ///      vector B, and thus bitcastable to B as well.
+  /// 2.2. All pointers of the same address space, no matter what they point to,
+  ///      bitcastable. So if C is pointer, it could be bitcasted to A and to B.
+  /// So any constant equal or bitcastable to A is equal or bitcastable to B.
+  /// QED.
+  ///
+  /// In another words, for pointers and vectors, we ignore top-level type and
+  /// look at their particular properties (bit-width for vectors, and
+  /// address space for pointers).
+  /// If these properties are equal - compare their contents.
+  int cmpConstants(const Constant *L, const Constant *R) const;
+
+  /// Compares two global values by number. Uses the GlobalNumbersState to
+  /// identify the same gobals across function calls.
+  int cmpGlobalValues(GlobalValue *L, GlobalValue *R) const;
+
+  /// Assign or look up previously assigned numbers for the two values, and
+  /// return whether the numbers are equal. Numbers are assigned in the order
+  /// visited.
+  /// Comparison order:
+  /// Stage 0: Value that is function itself is always greater then others.
+  ///          If left and right values are references to their functions, then
+  ///          they are equal.
+  /// Stage 1: Constants are greater than non-constants.
+  ///          If both left and right are constants, then the result of
+  ///          cmpConstants is used as cmpValues result.
+  /// Stage 2: InlineAsm instances are greater than others. If both left and
+  ///          right are InlineAsm instances, InlineAsm* pointers casted to
+  ///          integers and compared as numbers.
+  /// Stage 3: For all other cases we compare order we meet these values in
+  ///          their functions. If right value was met first during scanning,
+  ///          then left value is greater.
+  ///          In another words, we compare serial numbers, for more details
+  ///          see comments for sn_mapL and sn_mapR.
+  int cmpValues(const Value *L, const Value *R) const;
+
+  /// Compare two Instructions for equivalence, similar to
+  /// Instruction::isSameOperationAs.
+  ///
+  /// Stages are listed in "most significant stage first" order:
+  /// On each stage below, we do comparison between some left and right
+  /// operation parts. If parts are non-equal, we assign parts comparison
+  /// result to the operation comparison result and exit from method.
+  /// Otherwise we proceed to the next stage.
+  /// Stages:
+  /// 1. Operations opcodes. Compared as numbers.
+  /// 2. Number of operands.
+  /// 3. Operation types. Compared with cmpType method.
+  /// 4. Compare operation subclass optional data as stream of bytes:
+  /// just convert it to integers and call cmpNumbers.
+  /// 5. Compare in operation operand types with cmpType in
+  /// most significant operand first order.
+  /// 6. Last stage. Check operations for some specific attributes.
+  /// For example, for Load it would be:
+  /// 6.1.Load: volatile (as boolean flag)
+  /// 6.2.Load: alignment (as integer numbers)
+  /// 6.3.Load: ordering (as underlying enum class value)
+  /// 6.4.Load: synch-scope (as integer numbers)
+  /// 6.5.Load: range metadata (as integer ranges)
+  /// On this stage its better to see the code, since its not more than 10-15
+  /// strings for particular instruction, and could change sometimes.
+  ///
+  /// Sets \p needToCmpOperands to true if the operands of the instructions
+  /// still must be compared afterwards. In this case it's already guaranteed
+  /// that both instructions have the same number of operands.
+  int cmpOperations(const Instruction *L, const Instruction *R,
+                    bool &needToCmpOperands) const;
+
+  /// cmpType - compares two types,
+  /// defines total ordering among the types set.
+  ///
+  /// Return values:
+  /// 0 if types are equal,
+  /// -1 if Left is less than Right,
+  /// +1 if Left is greater than Right.
+  ///
+  /// Description:
+  /// Comparison is broken onto stages. Like in lexicographical comparison
+  /// stage coming first has higher priority.
+  /// On each explanation stage keep in mind total ordering properties.
+  ///
+  /// 0. Before comparison we coerce pointer types of 0 address space to
+  /// integer.
+  /// We also don't bother with same type at left and right, so
+  /// just return 0 in this case.
+  ///
+  /// 1. If types are of different kind (different type IDs).
+  ///    Return result of type IDs comparison, treating them as numbers.
+  /// 2. If types are integers, check that they have the same width. If they
+  /// are vectors, check that they have the same count and subtype.
+  /// 3. Types have the same ID, so check whether they are one of:
+  /// * Void
+  /// * Float
+  /// * Double
+  /// * X86_FP80
+  /// * FP128
+  /// * PPC_FP128
+  /// * Label
+  /// * Metadata
+  /// We can treat these types as equal whenever their IDs are same.
+  /// 4. If Left and Right are pointers, return result of address space
+  /// comparison (numbers comparison). We can treat pointer types of same
+  /// address space as equal.
+  /// 5. If types are complex.
+  /// Then both Left and Right are to be expanded and their element types will
+  /// be checked with the same way. If we get Res != 0 on some stage, return it.
+  /// Otherwise return 0.
+  /// 6. For all other cases put llvm_unreachable.
+  int cmpTypes(Type *TyL, Type *TyR) const;
+
+  int cmpNumbers(uint64_t L, uint64_t R) const;
+  int cmpAPInts(const APInt &L, const APInt &R) const;
+  int cmpAPFloats(const APFloat &L, const APFloat &R) const;
+  int cmpMem(StringRef L, StringRef R) const;
+
+  // The two functions undergoing comparison.
+  const Function *FnL, *FnR;
+
+private:
+  int cmpOrderings(AtomicOrdering L, AtomicOrdering R) const;
+  int cmpInlineAsm(const InlineAsm *L, const InlineAsm *R) const;
+  int cmpAttrs(const AttributeList L, const AttributeList R) const;
+  int cmpRangeMetadata(const MDNode *L, const MDNode *R) const;
+  int cmpOperandBundlesSchema(const Instruction *L, const Instruction *R) const;
+
+  /// Compare two GEPs for equivalent pointer arithmetic.
+  /// Parts to be compared for each comparison stage,
+  /// most significant stage first:
+  /// 1. Address space. As numbers.
+  /// 2. Constant offset, (using GEPOperator::accumulateConstantOffset method).
+  /// 3. Pointer operand type (using cmpType method).
+  /// 4. Number of operands.
+  /// 5. Compare operands, using cmpValues method.
+  int cmpGEPs(const GEPOperator *GEPL, const GEPOperator *GEPR) const;
+  int cmpGEPs(const GetElementPtrInst *GEPL,
+              const GetElementPtrInst *GEPR) const {
+    return cmpGEPs(cast<GEPOperator>(GEPL), cast<GEPOperator>(GEPR));
+  }
+
+  /// Assign serial numbers to values from left function, and values from
+  /// right function.
+  /// Explanation:
+  /// Being comparing functions we need to compare values we meet at left and
+  /// right sides.
+  /// Its easy to sort things out for external values. It just should be
+  /// the same value at left and right.
+  /// But for local values (those were introduced inside function body)
+  /// we have to ensure they were introduced at exactly the same place,
+  /// and plays the same role.
+  /// Let's assign serial number to each value when we meet it first time.
+  /// Values that were met at same place will be with same serial numbers.
+  /// In this case it would be good to explain few points about values assigned
+  /// to BBs and other ways of implementation (see below).
+  ///
+  /// 1. Safety of BB reordering.
+  /// It's safe to change the order of BasicBlocks in function.
+  /// Relationship with other functions and serial numbering will not be
+  /// changed in this case.
+  /// As follows from FunctionComparator::compare(), we do CFG walk: we start
+  /// from the entry, and then take each terminator. So it doesn't matter how in
+  /// fact BBs are ordered in function. And since cmpValues are called during
+  /// this walk, the numbering depends only on how BBs located inside the CFG.
+  /// So the answer is - yes. We will get the same numbering.
+  ///
+  /// 2. Impossibility to use dominance properties of values.
+  /// If we compare two instruction operands: first is usage of local
+  /// variable AL from function FL, and second is usage of local variable AR
+  /// from FR, we could compare their origins and check whether they are
+  /// defined at the same place.
+  /// But, we are still not able to compare operands of PHI nodes, since those
+  /// could be operands from further BBs we didn't scan yet.
+  /// So it's impossible to use dominance properties in general.
+  mutable DenseMap<const Value*, int> sn_mapL, sn_mapR;
+
+  // The global state we will use
+  GlobalNumberState* GlobalNumbers;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_FUNCTIONCOMPARATOR_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/FunctionImportUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/FunctionImportUtils.h
new file mode 100644
index 0000000..b9fbef0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/FunctionImportUtils.h
@@ -0,0 +1,119 @@
+//===- FunctionImportUtils.h - Importing support utilities -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the FunctionImportGlobalProcessing class which is used
+// to perform the necessary global value handling for function importing.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_FUNCTIONIMPORTUTILS_H
+#define LLVM_TRANSFORMS_UTILS_FUNCTIONIMPORTUTILS_H
+
+#include "llvm/ADT/SetVector.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+
+namespace llvm {
+class Module;
+
+/// Class to handle necessary GlobalValue changes required by ThinLTO
+/// function importing, including linkage changes and any necessary renaming.
+class FunctionImportGlobalProcessing {
+  /// The Module which we are exporting or importing functions from.
+  Module &M;
+
+  /// Module summary index passed in for function importing/exporting handling.
+  const ModuleSummaryIndex &ImportIndex;
+
+  /// Globals to import from this module, all other functions will be
+  /// imported as declarations instead of definitions.
+  SetVector<GlobalValue *> *GlobalsToImport;
+
+  /// Set to true if the given ModuleSummaryIndex contains any functions
+  /// from this source module, in which case we must conservatively assume
+  /// that any of its functions may be imported into another module
+  /// as part of a different backend compilation process.
+  bool HasExportedFunctions = false;
+
+  /// Set of llvm.*used values, in order to validate that we don't try
+  /// to promote any non-renamable values.
+  SmallPtrSet<GlobalValue *, 8> Used;
+
+  /// Check if we should promote the given local value to global scope.
+  bool shouldPromoteLocalToGlobal(const GlobalValue *SGV);
+
+#ifndef NDEBUG
+  /// Check if the given value is a local that can't be renamed (promoted).
+  /// Only used in assertion checking, and disabled under NDEBUG since the Used
+  /// set will not be populated.
+  bool isNonRenamableLocal(const GlobalValue &GV) const;
+#endif
+
+  /// Helper methods to check if we are importing from or potentially
+  /// exporting from the current source module.
+  bool isPerformingImport() const { return GlobalsToImport != nullptr; }
+  bool isModuleExporting() const { return HasExportedFunctions; }
+
+  /// If we are importing from the source module, checks if we should
+  /// import SGV as a definition, otherwise import as a declaration.
+  bool doImportAsDefinition(const GlobalValue *SGV);
+
+  /// Get the name for SGV that should be used in the linked destination
+  /// module. Specifically, this handles the case where we need to rename
+  /// a local that is being promoted to global scope, which it will always
+  /// do when \p DoPromote is true (or when importing a local).
+  std::string getName(const GlobalValue *SGV, bool DoPromote);
+
+  /// Process globals so that they can be used in ThinLTO. This includes
+  /// promoting local variables so that they can be reference externally by
+  /// thin lto imported globals and converting strong external globals to
+  /// available_externally.
+  void processGlobalsForThinLTO();
+  void processGlobalForThinLTO(GlobalValue &GV);
+
+  /// Get the new linkage for SGV that should be used in the linked destination
+  /// module. Specifically, for ThinLTO importing or exporting it may need
+  /// to be adjusted. When \p DoPromote is true then we must adjust the
+  /// linkage for a required promotion of a local to global scope.
+  GlobalValue::LinkageTypes getLinkage(const GlobalValue *SGV, bool DoPromote);
+
+public:
+  FunctionImportGlobalProcessing(
+      Module &M, const ModuleSummaryIndex &Index,
+      SetVector<GlobalValue *> *GlobalsToImport = nullptr)
+      : M(M), ImportIndex(Index), GlobalsToImport(GlobalsToImport) {
+    // If we have a ModuleSummaryIndex but no function to import,
+    // then this is the primary module being compiled in a ThinLTO
+    // backend compilation, and we need to see if it has functions that
+    // may be exported to another backend compilation.
+    if (!GlobalsToImport)
+      HasExportedFunctions = ImportIndex.hasExportedFunctions(M);
+
+#ifndef NDEBUG
+    // First collect those in the llvm.used set.
+    collectUsedGlobalVariables(M, Used, /*CompilerUsed*/ false);
+    // Next collect those in the llvm.compiler.used set.
+    collectUsedGlobalVariables(M, Used, /*CompilerUsed*/ true);
+#endif
+  }
+
+  bool run();
+
+  static bool doImportAsDefinition(const GlobalValue *SGV,
+                                   SetVector<GlobalValue *> *GlobalsToImport);
+};
+
+/// Perform in-place global value handling on the given Module for
+/// exported local functions renamed and promoted for ThinLTO.
+bool renameModuleForThinLTO(
+    Module &M, const ModuleSummaryIndex &Index,
+    SetVector<GlobalValue *> *GlobalsToImport = nullptr);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/GlobalStatus.h b/linux-x64/clang/include/llvm/Transforms/Utils/GlobalStatus.h
new file mode 100644
index 0000000..8cc265b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/GlobalStatus.h
@@ -0,0 +1,85 @@
+//===- GlobalStatus.h - Compute status info for globals ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_GLOBALSTATUS_H
+#define LLVM_TRANSFORMS_UTILS_GLOBALSTATUS_H
+
+#include "llvm/Support/AtomicOrdering.h"
+
+namespace llvm {
+
+class Constant;
+class Function;
+class Value;
+
+/// It is safe to destroy a constant iff it is only used by constants itself.
+/// Note that constants cannot be cyclic, so this test is pretty easy to
+/// implement recursively.
+///
+bool isSafeToDestroyConstant(const Constant *C);
+
+/// As we analyze each global, keep track of some information about it.  If we
+/// find out that the address of the global is taken, none of this info will be
+/// accurate.
+struct GlobalStatus {
+  /// True if the global's address is used in a comparison.
+  bool IsCompared = false;
+
+  /// True if the global is ever loaded.  If the global isn't ever loaded it
+  /// can be deleted.
+  bool IsLoaded = false;
+
+  /// Keep track of what stores to the global look like.
+  enum StoredType {
+    /// There is no store to this global.  It can thus be marked constant.
+    NotStored,
+
+    /// This global is stored to, but the only thing stored is the constant it
+    /// was initialized with. This is only tracked for scalar globals.
+    InitializerStored,
+
+    /// This global is stored to, but only its initializer and one other value
+    /// is ever stored to it.  If this global isStoredOnce, we track the value
+    /// stored to it in StoredOnceValue below.  This is only tracked for scalar
+    /// globals.
+    StoredOnce,
+
+    /// This global is stored to by multiple values or something else that we
+    /// cannot track.
+    Stored
+  } StoredType = NotStored;
+
+  /// If only one value (besides the initializer constant) is ever stored to
+  /// this global, keep track of what value it is.
+  Value *StoredOnceValue = nullptr;
+
+  /// These start out null/false.  When the first accessing function is noticed,
+  /// it is recorded. When a second different accessing function is noticed,
+  /// HasMultipleAccessingFunctions is set to true.
+  const Function *AccessingFunction = nullptr;
+  bool HasMultipleAccessingFunctions = false;
+
+  /// Set to true if this global has a user that is not an instruction (e.g. a
+  /// constant expr or GV initializer).
+  bool HasNonInstructionUser = false;
+
+  /// Set to the strongest atomic ordering requirement.
+  AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
+
+  GlobalStatus();
+
+  /// Look at all uses of the global and fill in the GlobalStatus structure.  If
+  /// the global has its address taken, return true to indicate we can't do
+  /// anything with it.
+  static bool analyzeGlobal(const Value *V, GlobalStatus &GS);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_GLOBALSTATUS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h b/linux-x64/clang/include/llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h
new file mode 100644
index 0000000..b7a3d13
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h
@@ -0,0 +1,107 @@
+//===-- ImportedFunctionsInliningStats.h ------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Generating inliner statistics for imported functions, mostly useful for
+// ThinLTO.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
+#define LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include <string>
+#include <vector>
+
+namespace llvm {
+class Module;
+class Function;
+/// \brief Calculate and dump ThinLTO specific inliner stats.
+/// The main statistics are:
+/// (1) Number of inlined imported functions,
+/// (2) Number of imported functions inlined into importing module (indirect),
+/// (3) Number of non imported functions inlined into importing module
+/// (indirect).
+/// The difference between first and the second is that first stat counts
+/// all performed inlines on imported functions, but the second one only the
+/// functions that have been eventually inlined to a function in the importing
+/// module (by a chain of inlines). Because llvm uses bottom-up inliner, it is
+/// possible to e.g. import function `A`, `B` and then inline `B` to `A`,
+/// and after this `A` might be too big to be inlined into some other function
+/// that calls it. It calculates this statistic by building graph, where
+/// the nodes are functions, and edges are performed inlines and then by marking
+/// the edges starting from not imported function.
+///
+/// If `Verbose` is set to true, then it also dumps statistics
+/// per each inlined function, sorted by the greatest inlines count like
+/// - number of performed inlines
+/// - number of performed inlines to importing module
+class ImportedFunctionsInliningStatistics {
+private:
+  /// InlineGraphNode represents node in graph of inlined functions.
+  struct InlineGraphNode {
+    // Default-constructible and movable.
+    InlineGraphNode() = default;
+    InlineGraphNode(InlineGraphNode &&) = default;
+    InlineGraphNode &operator=(InlineGraphNode &&) = default;
+
+    llvm::SmallVector<InlineGraphNode *, 8> InlinedCallees;
+    /// Incremented every direct inline.
+    int32_t NumberOfInlines = 0;
+    /// Number of inlines into non imported function (possibly indirect via
+    /// intermediate inlines). Computed based on graph search.
+    int32_t NumberOfRealInlines = 0;
+    bool Imported = false;
+    bool Visited = false;
+  };
+
+public:
+  ImportedFunctionsInliningStatistics() = default;
+  ImportedFunctionsInliningStatistics(
+      const ImportedFunctionsInliningStatistics &) = delete;
+
+  /// Set information like AllFunctions, ImportedFunctions, ModuleName.
+  void setModuleInfo(const Module &M);
+  /// Record inline of @param Callee to @param Caller for statistis.
+  void recordInline(const Function &Caller, const Function &Callee);
+  /// Dump stats computed with InlinerStatistics class.
+  /// If @param Verbose is true then separate statistics for every inlined
+  /// function will be printed.
+  void dump(bool Verbose);
+
+private:
+  /// Creates new Node in NodeMap and sets attributes, or returns existed one.
+  InlineGraphNode &createInlineGraphNode(const Function &);
+  void calculateRealInlines();
+  void dfs(InlineGraphNode &GraphNode);
+
+  using NodesMapTy =
+      llvm::StringMap<std::unique_ptr<InlineGraphNode>>;
+  using SortedNodesTy =
+      std::vector<const NodesMapTy::MapEntryTy*>;
+  /// Returns vector of elements sorted by
+  /// (-NumberOfInlines, -NumberOfRealInlines, FunctionName).
+  SortedNodesTy getSortedNodes();
+
+private:
+  /// This map manage life of all InlineGraphNodes. Unique pointer to
+  /// InlineGraphNode used since the node pointers are also saved in the
+  /// InlinedCallees vector. If it would store InlineGraphNode instead then the
+  /// address of the node would not be invariant.
+  NodesMapTy NodesMap;
+  /// Non external functions that have some other function inlined inside.
+  std::vector<StringRef> NonImportedCallers;
+  int AllFunctions = 0;
+  int ImportedFunctions = 0;
+  StringRef ModuleName;
+};
+
+} // llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/IntegerDivision.h b/linux-x64/clang/include/llvm/Transforms/Utils/IntegerDivision.h
new file mode 100644
index 0000000..0ec3321
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/IntegerDivision.h
@@ -0,0 +1,73 @@
+//===- llvm/Transforms/Utils/IntegerDivision.h ------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an implementation of 32bit and 64bit scalar integer
+// division for targets that don't have native support. It's largely derived
+// from compiler-rt's implementations of __udivsi3 and __udivmoddi4,
+// but hand-tuned for targets that prefer less control flow.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_INTEGERDIVISION_H
+#define LLVM_TRANSFORMS_UTILS_INTEGERDIVISION_H
+
+namespace llvm {
+  class BinaryOperator;
+}
+
+namespace llvm {
+
+  /// Generate code to calculate the remainder of two integers, replacing Rem
+  /// with the generated code. This currently generates code using the udiv
+  /// expansion, but future work includes generating more specialized code,
+  /// e.g. when more information about the operands are known. Implements both
+  /// 32bit and 64bit scalar division.
+  ///
+  /// @brief Replace Rem with generated code.
+  bool expandRemainder(BinaryOperator *Rem);
+
+  /// Generate code to divide two integers, replacing Div with the generated
+  /// code. This currently generates code similarly to compiler-rt's
+  /// implementations, but future work includes generating more specialized code
+  /// when more information about the operands are known. Implements both
+  /// 32bit and 64bit scalar division.
+  ///
+  /// @brief Replace Div with generated code.
+  bool expandDivision(BinaryOperator* Div);
+
+  /// Generate code to calculate the remainder of two integers, replacing Rem
+  /// with the generated code. Uses ExpandReminder with a 32bit Rem which
+  /// makes it useful for targets with little or no support for less than
+  /// 32 bit arithmetic.
+  ///
+  /// @brief Replace Rem with generated code.
+  bool expandRemainderUpTo32Bits(BinaryOperator *Rem);
+
+  /// Generate code to calculate the remainder of two integers, replacing Rem
+  /// with the generated code. Uses ExpandReminder with a 64bit Rem.
+  ///
+  /// @brief Replace Rem with generated code.
+  bool expandRemainderUpTo64Bits(BinaryOperator *Rem);
+
+  /// Generate code to divide two integers, replacing Div with the generated
+  /// code. Uses ExpandDivision with a 32bit Div which makes it useful for
+  /// targets with little or no support for less than 32 bit arithmetic.
+  ///
+  /// @brief Replace Rem with generated code.
+  bool expandDivisionUpTo32Bits(BinaryOperator *Div);
+
+  /// Generate code to divide two integers, replacing Div with the generated
+  /// code. Uses ExpandDivision with a 64bit Div.
+  ///
+  /// @brief Replace Rem with generated code.
+  bool expandDivisionUpTo64Bits(BinaryOperator *Div);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LCSSA.h b/linux-x64/clang/include/llvm/Transforms/Utils/LCSSA.h
new file mode 100644
index 0000000..fe717e5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LCSSA.h
@@ -0,0 +1,44 @@
+//===- LCSSA.h - Loop-closed SSA transform Pass -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass transforms loops by placing phi nodes at the end of the loops for
+// all values that are live across the loop boundary.  For example, it turns
+// the left into the right code:
+//
+// for (...)                for (...)
+//   if (c)                   if (c)
+//     X1 = ...                 X1 = ...
+//   else                     else
+//     X2 = ...                 X2 = ...
+//   X3 = phi(X1, X2)         X3 = phi(X1, X2)
+// ... = X3 + 4             X4 = phi(X3)
+//                          ... = X4 + 4
+//
+// This is still valid LLVM; the extra phi nodes are purely redundant, and will
+// be trivially eliminated by InstCombine.  The major benefit of this
+// transformation is that it makes many other loop optimizations, such as
+// LoopUnswitching, simpler.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LCSSA_H
+#define LLVM_TRANSFORMS_UTILS_LCSSA_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Converts loops into loop-closed SSA form.
+class LCSSAPass : public PassInfoMixin<LCSSAPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_LCSSA_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LibCallsShrinkWrap.h b/linux-x64/clang/include/llvm/Transforms/Utils/LibCallsShrinkWrap.h
new file mode 100644
index 0000000..c9df532
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LibCallsShrinkWrap.h
@@ -0,0 +1,27 @@
+//===- LibCallsShrinkWrap.h - Shrink Wrap Library Calls -------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LIBCALLSSHRINKWRAP_H
+#define LLVM_TRANSFORMS_UTILS_LIBCALLSSHRINKWRAP_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class LibCallsShrinkWrapPass : public PassInfoMixin<LibCallsShrinkWrapPass> {
+public:
+  static StringRef name() { return "LibCallsShrinkWrapPass"; }
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_LIBCALLSSHRINKWRAP_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LoopRotationUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/LoopRotationUtils.h
new file mode 100644
index 0000000..ea4d2cd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LoopRotationUtils.h
@@ -0,0 +1,35 @@
+//===- LoopRotationUtils.h - Utilities to perform loop rotation -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides utilities to convert a loop into a loop with bottom test.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOOPROTATIONUTILS_H
+#define LLVM_TRANSFORMS_UTILS_LOOPROTATIONUTILS_H
+
+namespace llvm {
+
+class AssumptionCache;
+class DominatorTree;
+class Loop;
+class LoopInfo;
+class ScalarEvolution;
+struct SimplifyQuery;
+class TargetTransformInfo;
+
+/// \brief Convert a loop into a loop with bottom test.
+bool LoopRotation(Loop *L, unsigned MaxHeaderSize, LoopInfo *LI,
+                  const TargetTransformInfo *TTI, AssumptionCache *AC,
+                  DominatorTree *DT, ScalarEvolution *SE,
+                  const SimplifyQuery &SQ);
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LoopSimplify.h b/linux-x64/clang/include/llvm/Transforms/Utils/LoopSimplify.h
new file mode 100644
index 0000000..f3828bc
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LoopSimplify.h
@@ -0,0 +1,65 @@
+//===- LoopSimplify.h - Loop Canonicalization Pass --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass performs several transformations to transform natural loops into a
+// simpler form, which makes subsequent analyses and transformations simpler and
+// more effective.
+//
+// Loop pre-header insertion guarantees that there is a single, non-critical
+// entry edge from outside of the loop to the loop header.  This simplifies a
+// number of analyses and transformations, such as LICM.
+//
+// Loop exit-block insertion guarantees that all exit blocks from the loop
+// (blocks which are outside of the loop that have predecessors inside of the
+// loop) only have predecessors from inside of the loop (and are thus dominated
+// by the loop header).  This simplifies transformations such as store-sinking
+// that are built into LICM.
+//
+// This pass also guarantees that loops will have exactly one backedge.
+//
+// Indirectbr instructions introduce several complications. If the loop
+// contains or is entered by an indirectbr instruction, it may not be possible
+// to transform the loop and make these guarantees. Client code should check
+// that these conditions are true before relying on them.
+//
+// Note that the simplifycfg pass will clean up blocks which are split out but
+// end up being unnecessary, so usage of this pass should not pessimize
+// generated code.
+//
+// This pass obviously modifies the CFG, but updates loop information and
+// dominator information.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_UTILS_LOOPSIMPLIFY_H
+#define LLVM_TRANSFORMS_UTILS_LOOPSIMPLIFY_H
+
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// This pass is responsible for loop canonicalization.
+class LoopSimplifyPass : public PassInfoMixin<LoopSimplifyPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Simplify each loop in a loop nest recursively.
+///
+/// This takes a potentially un-simplified loop L (and its children) and turns
+/// it into a simplified loop nest with preheaders and single backedges. It will
+/// update \c AliasAnalysis and \c ScalarEvolution analyses if they're non-null.
+bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE,
+                  AssumptionCache *AC, bool PreserveLCSSA);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_LOOPSIMPLIFY_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LoopUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/LoopUtils.h
new file mode 100644
index 0000000..131a4b0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LoopUtils.h
@@ -0,0 +1,546 @@
+//===- llvm/Transforms/Utils/LoopUtils.h - Loop utilities -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines some loop transformation utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
+#define LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/DemandedBits.h"
+#include "llvm/Analysis/EHPersonalities.h"
+#include "llvm/Analysis/MustExecute.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Casting.h"
+
+namespace llvm {
+
+class AliasSet;
+class AliasSetTracker;
+class BasicBlock;
+class DataLayout;
+class Loop;
+class LoopInfo;
+class OptimizationRemarkEmitter;
+class PredicatedScalarEvolution;
+class PredIteratorCache;
+class ScalarEvolution;
+class SCEV;
+class TargetLibraryInfo;
+class TargetTransformInfo;
+
+
+/// The RecurrenceDescriptor is used to identify recurrences variables in a
+/// loop. Reduction is a special case of recurrence that has uses of the
+/// recurrence variable outside the loop. The method isReductionPHI identifies
+/// reductions that are basic recurrences.
+///
+/// Basic recurrences are defined as the summation, product, OR, AND, XOR, min,
+/// or max of a set of terms. For example: for(i=0; i<n; i++) { total +=
+/// array[i]; } is a summation of array elements. Basic recurrences are a
+/// special case of chains of recurrences (CR). See ScalarEvolution for CR
+/// references.
+
+/// This struct holds information about recurrence variables.
+class RecurrenceDescriptor {
+public:
+  /// This enum represents the kinds of recurrences that we support.
+  enum RecurrenceKind {
+    RK_NoRecurrence,  ///< Not a recurrence.
+    RK_IntegerAdd,    ///< Sum of integers.
+    RK_IntegerMult,   ///< Product of integers.
+    RK_IntegerOr,     ///< Bitwise or logical OR of numbers.
+    RK_IntegerAnd,    ///< Bitwise or logical AND of numbers.
+    RK_IntegerXor,    ///< Bitwise or logical XOR of numbers.
+    RK_IntegerMinMax, ///< Min/max implemented in terms of select(cmp()).
+    RK_FloatAdd,      ///< Sum of floats.
+    RK_FloatMult,     ///< Product of floats.
+    RK_FloatMinMax    ///< Min/max implemented in terms of select(cmp()).
+  };
+
+  // This enum represents the kind of minmax recurrence.
+  enum MinMaxRecurrenceKind {
+    MRK_Invalid,
+    MRK_UIntMin,
+    MRK_UIntMax,
+    MRK_SIntMin,
+    MRK_SIntMax,
+    MRK_FloatMin,
+    MRK_FloatMax
+  };
+
+  RecurrenceDescriptor() = default;
+
+  RecurrenceDescriptor(Value *Start, Instruction *Exit, RecurrenceKind K,
+                       MinMaxRecurrenceKind MK, Instruction *UAI, Type *RT,
+                       bool Signed, SmallPtrSetImpl<Instruction *> &CI)
+      : StartValue(Start), LoopExitInstr(Exit), Kind(K), MinMaxKind(MK),
+        UnsafeAlgebraInst(UAI), RecurrenceType(RT), IsSigned(Signed) {
+    CastInsts.insert(CI.begin(), CI.end());
+  }
+
+  /// This POD struct holds information about a potential recurrence operation.
+  class InstDesc {
+  public:
+    InstDesc(bool IsRecur, Instruction *I, Instruction *UAI = nullptr)
+        : IsRecurrence(IsRecur), PatternLastInst(I), MinMaxKind(MRK_Invalid),
+          UnsafeAlgebraInst(UAI) {}
+
+    InstDesc(Instruction *I, MinMaxRecurrenceKind K, Instruction *UAI = nullptr)
+        : IsRecurrence(true), PatternLastInst(I), MinMaxKind(K),
+          UnsafeAlgebraInst(UAI) {}
+
+    bool isRecurrence() { return IsRecurrence; }
+
+    bool hasUnsafeAlgebra() { return UnsafeAlgebraInst != nullptr; }
+
+    Instruction *getUnsafeAlgebraInst() { return UnsafeAlgebraInst; }
+
+    MinMaxRecurrenceKind getMinMaxKind() { return MinMaxKind; }
+
+    Instruction *getPatternInst() { return PatternLastInst; }
+
+  private:
+    // Is this instruction a recurrence candidate.
+    bool IsRecurrence;
+    // The last instruction in a min/max pattern (select of the select(icmp())
+    // pattern), or the current recurrence instruction otherwise.
+    Instruction *PatternLastInst;
+    // If this is a min/max pattern the comparison predicate.
+    MinMaxRecurrenceKind MinMaxKind;
+    // Recurrence has unsafe algebra.
+    Instruction *UnsafeAlgebraInst;
+  };
+
+  /// Returns a struct describing if the instruction 'I' can be a recurrence
+  /// variable of type 'Kind'. If the recurrence is a min/max pattern of
+  /// select(icmp()) this function advances the instruction pointer 'I' from the
+  /// compare instruction to the select instruction and stores this pointer in
+  /// 'PatternLastInst' member of the returned struct.
+  static InstDesc isRecurrenceInstr(Instruction *I, RecurrenceKind Kind,
+                                    InstDesc &Prev, bool HasFunNoNaNAttr);
+
+  /// Returns true if instruction I has multiple uses in Insts
+  static bool hasMultipleUsesOf(Instruction *I,
+                                SmallPtrSetImpl<Instruction *> &Insts);
+
+  /// Returns true if all uses of the instruction I is within the Set.
+  static bool areAllUsesIn(Instruction *I, SmallPtrSetImpl<Instruction *> &Set);
+
+  /// Returns a struct describing if the instruction if the instruction is a
+  /// Select(ICmp(X, Y), X, Y) instruction pattern corresponding to a min(X, Y)
+  /// or max(X, Y).
+  static InstDesc isMinMaxSelectCmpPattern(Instruction *I, InstDesc &Prev);
+
+  /// Returns identity corresponding to the RecurrenceKind.
+  static Constant *getRecurrenceIdentity(RecurrenceKind K, Type *Tp);
+
+  /// Returns the opcode of binary operation corresponding to the
+  /// RecurrenceKind.
+  static unsigned getRecurrenceBinOp(RecurrenceKind Kind);
+
+  /// Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
+  static Value *createMinMaxOp(IRBuilder<> &Builder, MinMaxRecurrenceKind RK,
+                               Value *Left, Value *Right);
+
+  /// Returns true if Phi is a reduction of type Kind and adds it to the
+  /// RecurrenceDescriptor. If either \p DB is non-null or \p AC and \p DT are
+  /// non-null, the minimal bit width needed to compute the reduction will be
+  /// computed.
+  static bool AddReductionVar(PHINode *Phi, RecurrenceKind Kind, Loop *TheLoop,
+                              bool HasFunNoNaNAttr,
+                              RecurrenceDescriptor &RedDes,
+                              DemandedBits *DB = nullptr,
+                              AssumptionCache *AC = nullptr,
+                              DominatorTree *DT = nullptr);
+
+  /// Returns true if Phi is a reduction in TheLoop. The RecurrenceDescriptor
+  /// is returned in RedDes. If either \p DB is non-null or \p AC and \p DT are
+  /// non-null, the minimal bit width needed to compute the reduction will be
+  /// computed.
+  static bool isReductionPHI(PHINode *Phi, Loop *TheLoop,
+                             RecurrenceDescriptor &RedDes,
+                             DemandedBits *DB = nullptr,
+                             AssumptionCache *AC = nullptr,
+                             DominatorTree *DT = nullptr);
+
+  /// Returns true if Phi is a first-order recurrence. A first-order recurrence
+  /// is a non-reduction recurrence relation in which the value of the
+  /// recurrence in the current loop iteration equals a value defined in the
+  /// previous iteration. \p SinkAfter includes pairs of instructions where the
+  /// first will be rescheduled to appear after the second if/when the loop is
+  /// vectorized. It may be augmented with additional pairs if needed in order
+  /// to handle Phi as a first-order recurrence.
+  static bool
+  isFirstOrderRecurrence(PHINode *Phi, Loop *TheLoop,
+                         DenseMap<Instruction *, Instruction *> &SinkAfter,
+                         DominatorTree *DT);
+
+  RecurrenceKind getRecurrenceKind() { return Kind; }
+
+  MinMaxRecurrenceKind getMinMaxRecurrenceKind() { return MinMaxKind; }
+
+  TrackingVH<Value> getRecurrenceStartValue() { return StartValue; }
+
+  Instruction *getLoopExitInstr() { return LoopExitInstr; }
+
+  /// Returns true if the recurrence has unsafe algebra which requires a relaxed
+  /// floating-point model.
+  bool hasUnsafeAlgebra() { return UnsafeAlgebraInst != nullptr; }
+
+  /// Returns first unsafe algebra instruction in the PHI node's use-chain.
+  Instruction *getUnsafeAlgebraInst() { return UnsafeAlgebraInst; }
+
+  /// Returns true if the recurrence kind is an integer kind.
+  static bool isIntegerRecurrenceKind(RecurrenceKind Kind);
+
+  /// Returns true if the recurrence kind is a floating point kind.
+  static bool isFloatingPointRecurrenceKind(RecurrenceKind Kind);
+
+  /// Returns true if the recurrence kind is an arithmetic kind.
+  static bool isArithmeticRecurrenceKind(RecurrenceKind Kind);
+
+  /// Returns the type of the recurrence. This type can be narrower than the
+  /// actual type of the Phi if the recurrence has been type-promoted.
+  Type *getRecurrenceType() { return RecurrenceType; }
+
+  /// Returns a reference to the instructions used for type-promoting the
+  /// recurrence.
+  SmallPtrSet<Instruction *, 8> &getCastInsts() { return CastInsts; }
+
+  /// Returns true if all source operands of the recurrence are SExtInsts.
+  bool isSigned() { return IsSigned; }
+
+private:
+  // The starting value of the recurrence.
+  // It does not have to be zero!
+  TrackingVH<Value> StartValue;
+  // The instruction who's value is used outside the loop.
+  Instruction *LoopExitInstr = nullptr;
+  // The kind of the recurrence.
+  RecurrenceKind Kind = RK_NoRecurrence;
+  // If this a min/max recurrence the kind of recurrence.
+  MinMaxRecurrenceKind MinMaxKind = MRK_Invalid;
+  // First occurrence of unasfe algebra in the PHI's use-chain.
+  Instruction *UnsafeAlgebraInst = nullptr;
+  // The type of the recurrence.
+  Type *RecurrenceType = nullptr;
+  // True if all source operands of the recurrence are SExtInsts.
+  bool IsSigned = false;
+  // Instructions used for type-promoting the recurrence.
+  SmallPtrSet<Instruction *, 8> CastInsts;
+};
+
+/// A struct for saving information about induction variables.
+class InductionDescriptor {
+public:
+  /// This enum represents the kinds of inductions that we support.
+  enum InductionKind {
+    IK_NoInduction,  ///< Not an induction variable.
+    IK_IntInduction, ///< Integer induction variable. Step = C.
+    IK_PtrInduction, ///< Pointer induction var. Step = C / sizeof(elem).
+    IK_FpInduction   ///< Floating point induction variable.
+  };
+
+public:
+  /// Default constructor - creates an invalid induction.
+  InductionDescriptor() = default;
+
+  /// Get the consecutive direction. Returns:
+  ///   0 - unknown or non-consecutive.
+  ///   1 - consecutive and increasing.
+  ///  -1 - consecutive and decreasing.
+  int getConsecutiveDirection() const;
+
+  /// Compute the transformed value of Index at offset StartValue using step
+  /// StepValue.
+  /// For integer induction, returns StartValue + Index * StepValue.
+  /// For pointer induction, returns StartValue[Index * StepValue].
+  /// FIXME: The newly created binary instructions should contain nsw/nuw
+  /// flags, which can be found from the original scalar operations.
+  Value *transform(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
+                   const DataLayout& DL) const;
+
+  Value *getStartValue() const { return StartValue; }
+  InductionKind getKind() const { return IK; }
+  const SCEV *getStep() const { return Step; }
+  ConstantInt *getConstIntStepValue() const;
+
+  /// Returns true if \p Phi is an induction in the loop \p L. If \p Phi is an
+  /// induction, the induction descriptor \p D will contain the data describing
+  /// this induction. If by some other means the caller has a better SCEV
+  /// expression for \p Phi than the one returned by the ScalarEvolution
+  /// analysis, it can be passed through \p Expr. If the def-use chain 
+  /// associated with the phi includes casts (that we know we can ignore
+  /// under proper runtime checks), they are passed through \p CastsToIgnore.
+  static bool 
+  isInductionPHI(PHINode *Phi, const Loop* L, ScalarEvolution *SE,
+                 InductionDescriptor &D, const SCEV *Expr = nullptr,
+                 SmallVectorImpl<Instruction *> *CastsToIgnore = nullptr);
+
+  /// Returns true if \p Phi is a floating point induction in the loop \p L.
+  /// If \p Phi is an induction, the induction descriptor \p D will contain 
+  /// the data describing this induction.
+  static bool isFPInductionPHI(PHINode *Phi, const Loop* L,
+                               ScalarEvolution *SE, InductionDescriptor &D);
+
+  /// Returns true if \p Phi is a loop \p L induction, in the context associated
+  /// with the run-time predicate of PSE. If \p Assume is true, this can add
+  /// further SCEV predicates to \p PSE in order to prove that \p Phi is an
+  /// induction.
+  /// If \p Phi is an induction, \p D will contain the data describing this
+  /// induction.
+  static bool isInductionPHI(PHINode *Phi, const Loop* L,
+                             PredicatedScalarEvolution &PSE,
+                             InductionDescriptor &D, bool Assume = false);
+
+  /// Returns true if the induction type is FP and the binary operator does
+  /// not have the "fast-math" property. Such operation requires a relaxed FP
+  /// mode.
+  bool hasUnsafeAlgebra() {
+    return InductionBinOp && !cast<FPMathOperator>(InductionBinOp)->isFast();
+  }
+
+  /// Returns induction operator that does not have "fast-math" property
+  /// and requires FP unsafe mode.
+  Instruction *getUnsafeAlgebraInst() {
+    if (!InductionBinOp || cast<FPMathOperator>(InductionBinOp)->isFast())
+      return nullptr;
+    return InductionBinOp;
+  }
+
+  /// Returns binary opcode of the induction operator.
+  Instruction::BinaryOps getInductionOpcode() const {
+    return InductionBinOp ? InductionBinOp->getOpcode() :
+      Instruction::BinaryOpsEnd;
+  }
+
+  /// Returns a reference to the type cast instructions in the induction 
+  /// update chain, that are redundant when guarded with a runtime
+  /// SCEV overflow check.
+  const SmallVectorImpl<Instruction *> &getCastInsts() const { 
+    return RedundantCasts; 
+  }
+
+private:
+  /// Private constructor - used by \c isInductionPHI.
+  InductionDescriptor(Value *Start, InductionKind K, const SCEV *Step,
+                      BinaryOperator *InductionBinOp = nullptr,
+                      SmallVectorImpl<Instruction *> *Casts = nullptr);
+
+  /// Start value.
+  TrackingVH<Value> StartValue;
+  /// Induction kind.
+  InductionKind IK = IK_NoInduction;
+  /// Step value.
+  const SCEV *Step = nullptr;
+  // Instruction that advances induction variable.
+  BinaryOperator *InductionBinOp = nullptr;
+  // Instructions used for type-casts of the induction variable,
+  // that are redundant when guarded with a runtime SCEV overflow check.
+  SmallVector<Instruction *, 2> RedundantCasts;
+};
+
+BasicBlock *InsertPreheaderForLoop(Loop *L, DominatorTree *DT, LoopInfo *LI,
+                                   bool PreserveLCSSA);
+
+/// Ensure that all exit blocks of the loop are dedicated exits.
+///
+/// For any loop exit block with non-loop predecessors, we split the loop
+/// predecessors to use a dedicated loop exit block. We update the dominator
+/// tree and loop info if provided, and will preserve LCSSA if requested.
+bool formDedicatedExitBlocks(Loop *L, DominatorTree *DT, LoopInfo *LI,
+                             bool PreserveLCSSA);
+
+/// Ensures LCSSA form for every instruction from the Worklist in the scope of
+/// innermost containing loop.
+///
+/// For the given instruction which have uses outside of the loop, an LCSSA PHI
+/// node is inserted and the uses outside the loop are rewritten to use this
+/// node.
+///
+/// LoopInfo and DominatorTree are required and, since the routine makes no
+/// changes to CFG, preserved.
+///
+/// Returns true if any modifications are made.
+bool formLCSSAForInstructions(SmallVectorImpl<Instruction *> &Worklist,
+                              DominatorTree &DT, LoopInfo &LI);
+
+/// \brief Put loop into LCSSA form.
+///
+/// Looks at all instructions in the loop which have uses outside of the
+/// current loop. For each, an LCSSA PHI node is inserted and the uses outside
+/// the loop are rewritten to use this node.
+///
+/// LoopInfo and DominatorTree are required and preserved.
+///
+/// If ScalarEvolution is passed in, it will be preserved.
+///
+/// Returns true if any modifications are made to the loop.
+bool formLCSSA(Loop &L, DominatorTree &DT, LoopInfo *LI, ScalarEvolution *SE);
+
+/// \brief Put a loop nest into LCSSA form.
+///
+/// This recursively forms LCSSA for a loop nest.
+///
+/// LoopInfo and DominatorTree are required and preserved.
+///
+/// If ScalarEvolution is passed in, it will be preserved.
+///
+/// Returns true if any modifications are made to the loop.
+bool formLCSSARecursively(Loop &L, DominatorTree &DT, LoopInfo *LI,
+                          ScalarEvolution *SE);
+
+/// \brief Walk the specified region of the CFG (defined by all blocks
+/// dominated by the specified block, and that are in the current loop) in
+/// reverse depth first order w.r.t the DominatorTree. This allows us to visit
+/// uses before definitions, allowing us to sink a loop body in one pass without
+/// iteration. Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree,
+/// DataLayout, TargetLibraryInfo, Loop, AliasSet information for all
+/// instructions of the loop and loop safety information as
+/// arguments. Diagnostics is emitted via \p ORE. It returns changed status.
+bool sinkRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
+                TargetLibraryInfo *, TargetTransformInfo *, Loop *,
+                AliasSetTracker *, LoopSafetyInfo *,
+                OptimizationRemarkEmitter *ORE);
+
+/// \brief Walk the specified region of the CFG (defined by all blocks
+/// dominated by the specified block, and that are in the current loop) in depth
+/// first order w.r.t the DominatorTree.  This allows us to visit definitions
+/// before uses, allowing us to hoist a loop body in one pass without iteration.
+/// Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree, DataLayout,
+/// TargetLibraryInfo, Loop, AliasSet information for all instructions of the
+/// loop and loop safety information as arguments. Diagnostics is emitted via \p
+/// ORE. It returns changed status.
+bool hoistRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
+                 TargetLibraryInfo *, Loop *, AliasSetTracker *,
+                 LoopSafetyInfo *, OptimizationRemarkEmitter *ORE);
+
+/// This function deletes dead loops. The caller of this function needs to
+/// guarantee that the loop is infact dead.
+/// The function requires a bunch or prerequisites to be present:
+///   - The loop needs to be in LCSSA form
+///   - The loop needs to have a Preheader
+///   - A unique dedicated exit block must exist
+///
+/// This also updates the relevant analysis information in \p DT, \p SE, and \p
+/// LI if pointers to those are provided.
+/// It also updates the loop PM if an updater struct is provided.
+
+void deleteDeadLoop(Loop *L, DominatorTree *DT, ScalarEvolution *SE,
+                    LoopInfo *LI);
+
+/// \brief Try to promote memory values to scalars by sinking stores out of
+/// the loop and moving loads to before the loop.  We do this by looping over
+/// the stores in the loop, looking for stores to Must pointers which are
+/// loop invariant. It takes a set of must-alias values, Loop exit blocks
+/// vector, loop exit blocks insertion point vector, PredIteratorCache,
+/// LoopInfo, DominatorTree, Loop, AliasSet information for all instructions
+/// of the loop and loop safety information as arguments.
+/// Diagnostics is emitted via \p ORE. It returns changed status.
+bool promoteLoopAccessesToScalars(const SmallSetVector<Value *, 8> &,
+                                  SmallVectorImpl<BasicBlock *> &,
+                                  SmallVectorImpl<Instruction *> &,
+                                  PredIteratorCache &, LoopInfo *,
+                                  DominatorTree *, const TargetLibraryInfo *,
+                                  Loop *, AliasSetTracker *, LoopSafetyInfo *,
+                                  OptimizationRemarkEmitter *);
+
+/// Does a BFS from a given node to all of its children inside a given loop.
+/// The returned vector of nodes includes the starting point.
+SmallVector<DomTreeNode *, 16> collectChildrenInLoop(DomTreeNode *N,
+                                                     const Loop *CurLoop);
+
+/// \brief Returns the instructions that use values defined in the loop.
+SmallVector<Instruction *, 8> findDefsUsedOutsideOfLoop(Loop *L);
+
+/// \brief Find string metadata for loop
+///
+/// If it has a value (e.g. {"llvm.distribute", 1} return the value as an
+/// operand or null otherwise.  If the string metadata is not found return
+/// Optional's not-a-value.
+Optional<const MDOperand *> findStringMetadataForLoop(Loop *TheLoop,
+                                                      StringRef Name);
+
+/// \brief Set input string into loop metadata by keeping other values intact.
+void addStringMetadataToLoop(Loop *TheLoop, const char *MDString,
+                             unsigned V = 0);
+
+/// \brief Get a loop's estimated trip count based on branch weight metadata.
+/// Returns 0 when the count is estimated to be 0, or None when a meaningful
+/// estimate can not be made.
+Optional<unsigned> getLoopEstimatedTripCount(Loop *L);
+
+/// Helper to consistently add the set of standard passes to a loop pass's \c
+/// AnalysisUsage.
+///
+/// All loop passes should call this as part of implementing their \c
+/// getAnalysisUsage.
+void getLoopAnalysisUsage(AnalysisUsage &AU);
+
+/// Returns true if the hoister and sinker can handle this instruction.
+/// If SafetyInfo is null, we are checking for sinking instructions from
+/// preheader to loop body (no speculation).
+/// If SafetyInfo is not null, we are checking for hoisting/sinking
+/// instructions from loop body to preheader/exit. Check if the instruction
+/// can execute speculatively.
+/// If \p ORE is set use it to emit optimization remarks.
+bool canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
+                        Loop *CurLoop, AliasSetTracker *CurAST,
+                        LoopSafetyInfo *SafetyInfo,
+                        OptimizationRemarkEmitter *ORE = nullptr);
+
+/// Generates a vector reduction using shufflevectors to reduce the value.
+Value *getShuffleReduction(IRBuilder<> &Builder, Value *Src, unsigned Op,
+                           RecurrenceDescriptor::MinMaxRecurrenceKind
+                               MinMaxKind = RecurrenceDescriptor::MRK_Invalid,
+                           ArrayRef<Value *> RedOps = ArrayRef<Value *>());
+
+/// Create a target reduction of the given vector. The reduction operation
+/// is described by the \p Opcode parameter. min/max reductions require
+/// additional information supplied in \p Flags.
+/// The target is queried to determine if intrinsics or shuffle sequences are
+/// required to implement the reduction.
+Value *
+createSimpleTargetReduction(IRBuilder<> &B, const TargetTransformInfo *TTI,
+                            unsigned Opcode, Value *Src,
+                            TargetTransformInfo::ReductionFlags Flags =
+                                TargetTransformInfo::ReductionFlags(),
+                            ArrayRef<Value *> RedOps = ArrayRef<Value *>());
+
+/// Create a generic target reduction using a recurrence descriptor \p Desc
+/// The target is queried to determine if intrinsics or shuffle sequences are
+/// required to implement the reduction.
+Value *createTargetReduction(IRBuilder<> &B, const TargetTransformInfo *TTI,
+                             RecurrenceDescriptor &Desc, Value *Src,
+                             bool NoNaN = false);
+
+/// Get the intersection (logical and) of all of the potential IR flags
+/// of each scalar operation (VL) that will be converted into a vector (I).
+/// If OpValue is non-null, we only consider operations similar to OpValue
+/// when intersecting.
+/// Flag set: NSW, NUW, exact, and all of fast-math.
+void propagateIRFlags(Value *I, ArrayRef<Value *> VL, Value *OpValue = nullptr);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LoopVersioning.h b/linux-x64/clang/include/llvm/Transforms/Utils/LoopVersioning.h
new file mode 100644
index 0000000..fa5d784
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LoopVersioning.h
@@ -0,0 +1,152 @@
+//===- LoopVersioning.h - Utility to version a loop -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a utility class to perform loop versioning.  The versioned
+// loop speculates that otherwise may-aliasing memory accesses don't overlap and
+// emits checks to prove this.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOOPVERSIONING_H
+#define LLVM_TRANSFORMS_UTILS_LOOPVERSIONING_H
+
+#include "llvm/Analysis/LoopAccessAnalysis.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+
+namespace llvm {
+
+class Loop;
+class LoopAccessInfo;
+class LoopInfo;
+class ScalarEvolution;
+
+/// \brief This class emits a version of the loop where run-time checks ensure
+/// that may-alias pointers can't overlap.
+///
+/// It currently only supports single-exit loops and assumes that the loop
+/// already has a preheader.
+class LoopVersioning {
+public:
+  /// \brief Expects LoopAccessInfo, Loop, LoopInfo, DominatorTree as input.
+  /// It uses runtime check provided by the user. If \p UseLAIChecks is true,
+  /// we will retain the default checks made by LAI. Otherwise, construct an
+  /// object having no checks and we expect the user to add them.
+  LoopVersioning(const LoopAccessInfo &LAI, Loop *L, LoopInfo *LI,
+                 DominatorTree *DT, ScalarEvolution *SE,
+                 bool UseLAIChecks = true);
+
+  /// \brief Performs the CFG manipulation part of versioning the loop including
+  /// the DominatorTree and LoopInfo updates.
+  ///
+  /// The loop that was used to construct the class will be the "versioned" loop
+  /// i.e. the loop that will receive control if all the memchecks pass.
+  ///
+  /// This allows the loop transform pass to operate on the same loop regardless
+  /// of whether versioning was necessary or not:
+  ///
+  ///    for each loop L:
+  ///        analyze L
+  ///        if versioning is necessary version L
+  ///        transform L
+  void versionLoop() { versionLoop(findDefsUsedOutsideOfLoop(VersionedLoop)); }
+
+  /// \brief Same but if the client has already precomputed the set of values
+  /// used outside the loop, this API will allows passing that.
+  void versionLoop(const SmallVectorImpl<Instruction *> &DefsUsedOutside);
+
+  /// \brief Returns the versioned loop.  Control flows here if pointers in the
+  /// loop don't alias (i.e. all memchecks passed).  (This loop is actually the
+  /// same as the original loop that we got constructed with.)
+  Loop *getVersionedLoop() { return VersionedLoop; }
+
+  /// \brief Returns the fall-back loop.  Control flows here if pointers in the
+  /// loop may alias (i.e. one of the memchecks failed).
+  Loop *getNonVersionedLoop() { return NonVersionedLoop; }
+
+  /// \brief Sets the runtime alias checks for versioning the loop.
+  void setAliasChecks(
+      SmallVector<RuntimePointerChecking::PointerCheck, 4> Checks);
+
+  /// \brief Sets the runtime SCEV checks for versioning the loop.
+  void setSCEVChecks(SCEVUnionPredicate Check);
+
+  /// \brief Annotate memory instructions in the versioned loop with no-alias
+  /// metadata based on the memchecks issued.
+  ///
+  /// This is just wrapper that calls prepareNoAliasMetadata and
+  /// annotateInstWithNoAlias on the instructions of the versioned loop.
+  void annotateLoopWithNoAlias();
+
+  /// \brief Set up the aliasing scopes based on the memchecks.  This needs to
+  /// be called before the first call to annotateInstWithNoAlias.
+  void prepareNoAliasMetadata();
+
+  /// \brief Add the noalias annotations to \p VersionedInst.
+  ///
+  /// \p OrigInst is the instruction corresponding to \p VersionedInst in the
+  /// original loop.  Initialize the aliasing scopes with
+  /// prepareNoAliasMetadata once before this can be called.
+  void annotateInstWithNoAlias(Instruction *VersionedInst,
+                               const Instruction *OrigInst);
+
+private:
+  /// \brief Adds the necessary PHI nodes for the versioned loops based on the
+  /// loop-defined values used outside of the loop.
+  ///
+  /// This needs to be called after versionLoop if there are defs in the loop
+  /// that are used outside the loop.
+  void addPHINodes(const SmallVectorImpl<Instruction *> &DefsUsedOutside);
+
+  /// \brief Add the noalias annotations to \p I.  Initialize the aliasing
+  /// scopes with prepareNoAliasMetadata once before this can be called.
+  void annotateInstWithNoAlias(Instruction *I) {
+    annotateInstWithNoAlias(I, I);
+  }
+
+  /// \brief The original loop.  This becomes the "versioned" one.  I.e.,
+  /// control flows here if pointers in the loop don't alias.
+  Loop *VersionedLoop;
+  /// \brief The fall-back loop.  I.e. control flows here if pointers in the
+  /// loop may alias (memchecks failed).
+  Loop *NonVersionedLoop;
+
+  /// \brief This maps the instructions from VersionedLoop to their counterpart
+  /// in NonVersionedLoop.
+  ValueToValueMapTy VMap;
+
+  /// \brief The set of alias checks that we are versioning for.
+  SmallVector<RuntimePointerChecking::PointerCheck, 4> AliasChecks;
+
+  /// \brief The set of SCEV checks that we are versioning for.
+  SCEVUnionPredicate Preds;
+
+  /// \brief Maps a pointer to the pointer checking group that the pointer
+  /// belongs to.
+  DenseMap<const Value *, const RuntimePointerChecking::CheckingPtrGroup *>
+      PtrToGroup;
+
+  /// \brief The alias scope corresponding to a pointer checking group.
+  DenseMap<const RuntimePointerChecking::CheckingPtrGroup *, MDNode *>
+      GroupToScope;
+
+  /// \brief The list of alias scopes that a pointer checking group can't alias.
+  DenseMap<const RuntimePointerChecking::CheckingPtrGroup *, MDNode *>
+      GroupToNonAliasingScopeList;
+
+  /// \brief Analyses used.
+  const LoopAccessInfo &LAI;
+  LoopInfo *LI;
+  DominatorTree *DT;
+  ScalarEvolution *SE;
+};
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LowerInvoke.h b/linux-x64/clang/include/llvm/Transforms/Utils/LowerInvoke.h
new file mode 100644
index 0000000..12774c7
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LowerInvoke.h
@@ -0,0 +1,30 @@
+//===- LowerInvoke.h - Eliminate Invoke instructions ----------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This transformation is designed for use by code generators which do not yet
+// support stack unwinding.  This pass converts 'invoke' instructions to 'call'
+// instructions, so that any exception-handling 'landingpad' blocks become dead
+// code (which can be removed by running the '-simplifycfg' pass afterwards).
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_UTILS_LOWERINVOKE_H
+#define LLVM_TRANSFORMS_UTILS_LOWERINVOKE_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class LowerInvokePass : public PassInfoMixin<LowerInvokePass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+}
+
+#endif // LLVM_TRANSFORMS_UTILS_LOWERINVOKE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/LowerMemIntrinsics.h b/linux-x64/clang/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
new file mode 100644
index 0000000..2b7d0f6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
@@ -0,0 +1,56 @@
+//===- llvm/Transforms/Utils/LowerMemintrinsics.h ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Lower memset, memcpy, memmov intrinsics to loops (e.g. for targets without
+// library support).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOWERMEMINTRINSICS_H
+#define LLVM_TRANSFORMS_UTILS_LOWERMEMINTRINSICS_H
+
+namespace llvm {
+
+class ConstantInt;
+class Instruction;
+class MemCpyInst;
+class MemMoveInst;
+class MemSetInst;
+class TargetTransformInfo;
+class Value;
+
+/// Emit a loop implementing the semantics of llvm.memcpy where the size is not
+/// a compile-time constant. Loop will be insterted at \p InsertBefore.
+void createMemCpyLoopUnknownSize(Instruction *InsertBefore, Value *SrcAddr,
+                                 Value *DstAddr, Value *CopyLen,
+                                 unsigned SrcAlign, unsigned DestAlign,
+                                 bool SrcIsVolatile, bool DstIsVolatile,
+                                 const TargetTransformInfo &TTI);
+
+/// Emit a loop implementing the semantics of an llvm.memcpy whose size is a
+/// compile time constant. Loop is inserted at \p InsertBefore.
+void createMemCpyLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr,
+                               Value *DstAddr, ConstantInt *CopyLen,
+                               unsigned SrcAlign, unsigned DestAlign,
+                               bool SrcIsVolatile, bool DstIsVolatile,
+                               const TargetTransformInfo &TTI);
+
+
+/// Expand \p MemCpy as a loop. \p MemCpy is not deleted.
+void expandMemCpyAsLoop(MemCpyInst *MemCpy, const TargetTransformInfo &TTI);
+
+/// Expand \p MemMove as a loop. \p MemMove is not deleted.
+void expandMemMoveAsLoop(MemMoveInst *MemMove);
+
+/// Expand \p MemSet as a loop. \p MemSet is not deleted.
+void expandMemSetAsLoop(MemSetInst *MemSet);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/Mem2Reg.h b/linux-x64/clang/include/llvm/Transforms/Utils/Mem2Reg.h
new file mode 100644
index 0000000..4076843
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/Mem2Reg.h
@@ -0,0 +1,31 @@
+//===- Mem2Reg.h - The -mem2reg pass, a wrapper around the Utils lib ------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass is a simple pass wrapper around the PromoteMemToReg function call
+// exposed by the Utils library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_MEM2REG_H
+#define LLVM_TRANSFORMS_UTILS_MEM2REG_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+class PromotePass : public PassInfoMixin<PromotePass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_MEM2REG_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/ModuleUtils.h b/linux-x64/clang/include/llvm/Transforms/Utils/ModuleUtils.h
new file mode 100644
index 0000000..4b9bc82
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/ModuleUtils.h
@@ -0,0 +1,101 @@
+//===-- ModuleUtils.h - Functions to manipulate Modules ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions perform manipulations on Modules.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
+#define LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
+
+#include "llvm/ADT/StringRef.h"
+#include <utility> // for std::pair
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+class Module;
+class Function;
+class GlobalValue;
+class GlobalVariable;
+class Constant;
+class StringRef;
+class Value;
+class Type;
+
+/// Append F to the list of global ctors of module M with the given Priority.
+/// This wraps the function in the appropriate structure and stores it along
+/// side other global constructors. For details see
+/// http://llvm.org/docs/LangRef.html#intg_global_ctors
+void appendToGlobalCtors(Module &M, Function *F, int Priority,
+                         Constant *Data = nullptr);
+
+/// Same as appendToGlobalCtors(), but for global dtors.
+void appendToGlobalDtors(Module &M, Function *F, int Priority,
+                         Constant *Data = nullptr);
+
+// Validate the result of Module::getOrInsertFunction called for an interface
+// function of given sanitizer. If the instrumented module defines a function
+// with the same name, their prototypes must match, otherwise
+// getOrInsertFunction returns a bitcast.
+Function *checkSanitizerInterfaceFunction(Constant *FuncOrBitcast);
+
+Function *declareSanitizerInitFunction(Module &M, StringRef InitName,
+                                       ArrayRef<Type *> InitArgTypes);
+
+/// \brief Creates sanitizer constructor function, and calls sanitizer's init
+/// function from it.
+/// \return Returns pair of pointers to constructor, and init functions
+/// respectively.
+std::pair<Function *, Function *> createSanitizerCtorAndInitFunctions(
+    Module &M, StringRef CtorName, StringRef InitName,
+    ArrayRef<Type *> InitArgTypes, ArrayRef<Value *> InitArgs,
+    StringRef VersionCheckName = StringRef());
+
+/// Rename all the anon globals in the module using a hash computed from
+/// the list of public globals in the module.
+bool nameUnamedGlobals(Module &M);
+
+/// \brief Adds global values to the llvm.used list.
+void appendToUsed(Module &M, ArrayRef<GlobalValue *> Values);
+
+/// \brief Adds global values to the llvm.compiler.used list.
+void appendToCompilerUsed(Module &M, ArrayRef<GlobalValue *> Values);
+
+/// Filter out potentially dead comdat functions where other entries keep the
+/// entire comdat group alive.
+///
+/// This is designed for cases where functions appear to become dead but remain
+/// alive due to other live entries in their comdat group.
+///
+/// The \p DeadComdatFunctions container should only have pointers to
+/// `Function`s which are members of a comdat group and are believed to be
+/// dead.
+///
+/// After this routine finishes, the only remaining `Function`s in \p
+/// DeadComdatFunctions are those where every member of the comdat is listed
+/// and thus removing them is safe (provided *all* are removed).
+void filterDeadComdatFunctions(
+    Module &M, SmallVectorImpl<Function *> &DeadComdatFunctions);
+
+/// \brief Produce a unique identifier for this module by taking the MD5 sum of
+/// the names of the module's strong external symbols that are not comdat
+/// members.
+///
+/// This identifier is normally guaranteed to be unique, or the program would
+/// fail to link due to multiply defined symbols.
+///
+/// If the module has no strong external symbols (such a module may still have a
+/// semantic effect if it performs global initialization), we cannot produce a
+/// unique identifier for this module, so we return the empty string.
+std::string getUniqueModuleId(Module *M);
+
+} // End llvm namespace
+
+#endif //  LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/NameAnonGlobals.h b/linux-x64/clang/include/llvm/Transforms/Utils/NameAnonGlobals.h
new file mode 100644
index 0000000..17fc902
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/NameAnonGlobals.h
@@ -0,0 +1,33 @@
+//===-- NameAnonGlobals.h - Anonymous Global Naming Pass --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements naming anonymous globals to make sure they can be
+// referred to by ThinLTO.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_NAMEANONGLOBALSS_H
+#define LLVM_TRANSFORMS_UTILS_NAMEANONGLOBALSS_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Simple pass that provides a name to every anonymous globals.
+class NameAnonGlobalPass : public PassInfoMixin<NameAnonGlobalPass> {
+public:
+  NameAnonGlobalPass() = default;
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_NAMEANONGLOBALS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/OrderedInstructions.h b/linux-x64/clang/include/llvm/Transforms/Utils/OrderedInstructions.h
new file mode 100644
index 0000000..165d4bd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/OrderedInstructions.h
@@ -0,0 +1,54 @@
+//===- llvm/Transforms/Utils/OrderedInstructions.h -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an efficient way to check for dominance relation between 2
+// instructions.
+//
+// This interface dispatches to appropriate dominance check given 2
+// instructions, i.e. in case the instructions are in the same basic block,
+// OrderedBasicBlock (with instruction numbering and caching) are used.
+// Otherwise, dominator tree is used.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_ORDEREDINSTRUCTIONS_H
+#define LLVM_TRANSFORMS_UTILS_ORDEREDINSTRUCTIONS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Analysis/OrderedBasicBlock.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Operator.h"
+
+namespace llvm {
+
+class OrderedInstructions {
+  /// Used to check dominance for instructions in same basic block.
+  mutable DenseMap<const BasicBlock *, std::unique_ptr<OrderedBasicBlock>>
+      OBBMap;
+
+  /// The dominator tree of the parent function.
+  DominatorTree *DT;
+
+public:
+  /// Constructor.
+  OrderedInstructions(DominatorTree *DT) : DT(DT) {}
+
+  /// Return true if first instruction dominates the second.
+  bool dominates(const Instruction *, const Instruction *) const;
+
+  /// Invalidate the OrderedBasicBlock cache when its basic block changes.
+  /// i.e. If an instruction is deleted or added to the basic block, the user
+  /// should call this function to invalidate the OrderedBasicBlock cache for
+  /// this basic block.
+  void invalidateBlock(const BasicBlock *BB) { OBBMap.erase(BB); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_ORDEREDINSTRUCTIONS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/PredicateInfo.h b/linux-x64/clang/include/llvm/Transforms/Utils/PredicateInfo.h
new file mode 100644
index 0000000..8150f15
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/PredicateInfo.h
@@ -0,0 +1,295 @@
+//===- PredicateInfo.h - Build PredicateInfo ----------------------*-C++-*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief  This file implements the PredicateInfo analysis, which creates an Extended
+/// SSA form for operations used in branch comparisons and llvm.assume
+/// comparisons.
+///
+/// Copies of these operations are inserted into the true/false edge (and after
+/// assumes), and information attached to the copies.  All uses of the original
+/// operation in blocks dominated by the true/false edge (and assume), are
+/// replaced with uses of the copies.  This enables passes to easily and sparsely
+/// propagate condition based info into the operations that may be affected.
+///
+/// Example:
+/// %cmp = icmp eq i32 %x, 50
+/// br i1 %cmp, label %true, label %false
+/// true:
+/// ret i32 %x
+/// false:
+/// ret i32 1
+///
+/// will become
+///
+/// %cmp = icmp eq i32, %x, 50
+/// br i1 %cmp, label %true, label %false
+/// true:
+/// %x.0 = call @llvm.ssa_copy.i32(i32 %x)
+/// ret i32 %x.0
+/// false:
+/// ret i32 1
+///
+/// Using getPredicateInfoFor on x.0 will give you the comparison it is
+/// dominated by (the icmp), and that you are located in the true edge of that
+/// comparison, which tells you x.0 is 50.
+///
+/// In order to reduce the number of copies inserted, predicateinfo is only
+/// inserted where it would actually be live.  This means if there are no uses of
+/// an operation dominated by the branch edges, or by an assume, the associated
+/// predicate info is never inserted.
+///
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_PREDICATEINFO_H
+#define LLVM_TRANSFORMS_UTILS_PREDICATEINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Pass.h"
+#include "llvm/PassAnalysisSupport.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Transforms/Utils/OrderedInstructions.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <memory>
+#include <utility>
+
+namespace llvm {
+
+class DominatorTree;
+class Function;
+class Instruction;
+class MemoryAccess;
+class LLVMContext;
+class raw_ostream;
+
+enum PredicateType { PT_Branch, PT_Assume, PT_Switch };
+
+// Base class for all predicate information we provide.
+// All of our predicate information has at least a comparison.
+class PredicateBase : public ilist_node<PredicateBase> {
+public:
+  PredicateType Type;
+  // The original operand before we renamed it.
+  // This can be use by passes, when destroying predicateinfo, to know
+  // whether they can just drop the intrinsic, or have to merge metadata.
+  Value *OriginalOp;
+  PredicateBase(const PredicateBase &) = delete;
+  PredicateBase &operator=(const PredicateBase &) = delete;
+  PredicateBase() = delete;
+  virtual ~PredicateBase() = default;
+
+protected:
+  PredicateBase(PredicateType PT, Value *Op) : Type(PT), OriginalOp(Op) {}
+};
+
+class PredicateWithCondition : public PredicateBase {
+public:
+  Value *Condition;
+  static bool classof(const PredicateBase *PB) {
+    return PB->Type == PT_Assume || PB->Type == PT_Branch ||
+           PB->Type == PT_Switch;
+  }
+
+protected:
+  PredicateWithCondition(PredicateType PT, Value *Op, Value *Condition)
+      : PredicateBase(PT, Op), Condition(Condition) {}
+};
+
+// Provides predicate information for assumes.  Since assumes are always true,
+// we simply provide the assume instruction, so you can tell your relative
+// position to it.
+class PredicateAssume : public PredicateWithCondition {
+public:
+  IntrinsicInst *AssumeInst;
+  PredicateAssume(Value *Op, IntrinsicInst *AssumeInst, Value *Condition)
+      : PredicateWithCondition(PT_Assume, Op, Condition),
+        AssumeInst(AssumeInst) {}
+  PredicateAssume() = delete;
+  static bool classof(const PredicateBase *PB) {
+    return PB->Type == PT_Assume;
+  }
+};
+
+// Mixin class for edge predicates.  The FROM block is the block where the
+// predicate originates, and the TO block is the block where the predicate is
+// valid.
+class PredicateWithEdge : public PredicateWithCondition {
+public:
+  BasicBlock *From;
+  BasicBlock *To;
+  PredicateWithEdge() = delete;
+  static bool classof(const PredicateBase *PB) {
+    return PB->Type == PT_Branch || PB->Type == PT_Switch;
+  }
+
+protected:
+  PredicateWithEdge(PredicateType PType, Value *Op, BasicBlock *From,
+                    BasicBlock *To, Value *Cond)
+      : PredicateWithCondition(PType, Op, Cond), From(From), To(To) {}
+};
+
+// Provides predicate information for branches.
+class PredicateBranch : public PredicateWithEdge {
+public:
+  // If true, SplitBB is the true successor, otherwise it's the false successor.
+  bool TrueEdge;
+  PredicateBranch(Value *Op, BasicBlock *BranchBB, BasicBlock *SplitBB,
+                  Value *Condition, bool TakenEdge)
+      : PredicateWithEdge(PT_Branch, Op, BranchBB, SplitBB, Condition),
+        TrueEdge(TakenEdge) {}
+  PredicateBranch() = delete;
+  static bool classof(const PredicateBase *PB) {
+    return PB->Type == PT_Branch;
+  }
+};
+
+class PredicateSwitch : public PredicateWithEdge {
+public:
+  Value *CaseValue;
+  // This is the switch instruction.
+  SwitchInst *Switch;
+  PredicateSwitch(Value *Op, BasicBlock *SwitchBB, BasicBlock *TargetBB,
+                  Value *CaseValue, SwitchInst *SI)
+      : PredicateWithEdge(PT_Switch, Op, SwitchBB, TargetBB,
+                          SI->getCondition()),
+        CaseValue(CaseValue), Switch(SI) {}
+  PredicateSwitch() = delete;
+  static bool classof(const PredicateBase *PB) {
+    return PB->Type == PT_Switch;
+  }
+};
+
+// This name is used in a few places, so kick it into their own namespace
+namespace PredicateInfoClasses {
+struct ValueDFS;
+}
+
+/// \brief Encapsulates PredicateInfo, including all data associated with memory
+/// accesses.
+class PredicateInfo {
+private:
+  // Used to store information about each value we might rename.
+  struct ValueInfo {
+    // Information about each possible copy. During processing, this is each
+    // inserted info. After processing, we move the uninserted ones to the
+    // uninserted vector.
+    SmallVector<PredicateBase *, 4> Infos;
+    SmallVector<PredicateBase *, 4> UninsertedInfos;
+  };
+  // This owns the all the predicate infos in the function, placed or not.
+  iplist<PredicateBase> AllInfos;
+
+public:
+  PredicateInfo(Function &, DominatorTree &, AssumptionCache &);
+  ~PredicateInfo();
+
+  void verifyPredicateInfo() const;
+
+  void dump() const;
+  void print(raw_ostream &) const;
+
+  const PredicateBase *getPredicateInfoFor(const Value *V) const {
+    return PredicateMap.lookup(V);
+  }
+
+protected:
+  // Used by PredicateInfo annotater, dumpers, and wrapper pass.
+  friend class PredicateInfoAnnotatedWriter;
+  friend class PredicateInfoPrinterLegacyPass;
+
+private:
+  void buildPredicateInfo();
+  void processAssume(IntrinsicInst *, BasicBlock *, SmallPtrSetImpl<Value *> &);
+  void processBranch(BranchInst *, BasicBlock *, SmallPtrSetImpl<Value *> &);
+  void processSwitch(SwitchInst *, BasicBlock *, SmallPtrSetImpl<Value *> &);
+  void renameUses(SmallPtrSetImpl<Value *> &);
+  using ValueDFS = PredicateInfoClasses::ValueDFS;
+  typedef SmallVectorImpl<ValueDFS> ValueDFSStack;
+  void convertUsesToDFSOrdered(Value *, SmallVectorImpl<ValueDFS> &);
+  Value *materializeStack(unsigned int &, ValueDFSStack &, Value *);
+  bool stackIsInScope(const ValueDFSStack &, const ValueDFS &) const;
+  void popStackUntilDFSScope(ValueDFSStack &, const ValueDFS &);
+  ValueInfo &getOrCreateValueInfo(Value *);
+  void addInfoFor(SmallPtrSetImpl<Value *> &OpsToRename, Value *Op,
+                  PredicateBase *PB);
+  const ValueInfo &getValueInfo(Value *) const;
+  Function &F;
+  DominatorTree &DT;
+  AssumptionCache &AC;
+  OrderedInstructions OI;
+  // This maps from copy operands to Predicate Info. Note that it does not own
+  // the Predicate Info, they belong to the ValueInfo structs in the ValueInfos
+  // vector.
+  DenseMap<const Value *, const PredicateBase *> PredicateMap;
+  // This stores info about each operand or comparison result we make copies
+  // of.  The real ValueInfos start at index 1, index 0 is unused so that we can
+  // more easily detect invalid indexing.
+  SmallVector<ValueInfo, 32> ValueInfos;
+  // This gives the index into the ValueInfos array for a given Value.  Because
+  // 0 is not a valid Value Info index, you can use DenseMap::lookup and tell
+  // whether it returned a valid result.
+  DenseMap<Value *, unsigned int> ValueInfoNums;
+  // The set of edges along which we can only handle phi uses, due to critical
+  // edges.
+  DenseSet<std::pair<BasicBlock *, BasicBlock *>> EdgeUsesOnly;
+};
+
+// This pass does eager building and then printing of PredicateInfo. It is used
+// by
+// the tests to be able to build, dump, and verify PredicateInfo.
+class PredicateInfoPrinterLegacyPass : public FunctionPass {
+public:
+  PredicateInfoPrinterLegacyPass();
+
+  static char ID;
+  bool runOnFunction(Function &) override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+/// \brief Printer pass for \c PredicateInfo.
+class PredicateInfoPrinterPass
+    : public PassInfoMixin<PredicateInfoPrinterPass> {
+  raw_ostream &OS;
+
+public:
+  explicit PredicateInfoPrinterPass(raw_ostream &OS) : OS(OS) {}
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// \brief Verifier pass for \c PredicateInfo.
+struct PredicateInfoVerifierPass : PassInfoMixin<PredicateInfoVerifierPass> {
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_PREDICATEINFO_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/PromoteMemToReg.h b/linux-x64/clang/include/llvm/Transforms/Utils/PromoteMemToReg.h
new file mode 100644
index 0000000..bb8a61a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/PromoteMemToReg.h
@@ -0,0 +1,46 @@
+//===- PromoteMemToReg.h - Promote Allocas to Scalars -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes an interface to promote alloca instructions to SSA
+// registers, by using the SSA construction algorithm.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_PROMOTEMEMTOREG_H
+#define LLVM_TRANSFORMS_UTILS_PROMOTEMEMTOREG_H
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+class AllocaInst;
+class DominatorTree;
+class AliasSetTracker;
+class AssumptionCache;
+
+/// \brief Return true if this alloca is legal for promotion.
+///
+/// This is true if there are only loads, stores, and lifetime markers
+/// (transitively) using this alloca. This also enforces that there is only
+/// ever one layer of bitcasts or GEPs between the alloca and the lifetime
+/// markers.
+bool isAllocaPromotable(const AllocaInst *AI);
+
+/// \brief Promote the specified list of alloca instructions into scalar
+/// registers, inserting PHI nodes as appropriate.
+///
+/// This function makes use of DominanceFrontier information.  This function
+/// does not modify the CFG of the function at all.  All allocas must be from
+/// the same function.
+///
+void PromoteMemToReg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT,
+                     AssumptionCache *AC = nullptr);
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SSAUpdater.h b/linux-x64/clang/include/llvm/Transforms/Utils/SSAUpdater.h
new file mode 100644
index 0000000..6cd9f15
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SSAUpdater.h
@@ -0,0 +1,173 @@
+//===- SSAUpdater.h - Unstructured SSA Update Tool --------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SSAUpdater class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATER_H
+#define LLVM_TRANSFORMS_UTILS_SSAUPDATER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include <string>
+
+namespace llvm {
+
+class BasicBlock;
+class Instruction;
+class LoadInst;
+class PHINode;
+template <typename T> class SmallVectorImpl;
+template <typename T> class SSAUpdaterTraits;
+class Type;
+class Use;
+class Value;
+
+/// \brief Helper class for SSA formation on a set of values defined in
+/// multiple blocks.
+///
+/// This is used when code duplication or another unstructured
+/// transformation wants to rewrite a set of uses of one value with uses of a
+/// set of values.
+class SSAUpdater {
+  friend class SSAUpdaterTraits<SSAUpdater>;
+
+private:
+  /// This keeps track of which value to use on a per-block basis. When we
+  /// insert PHI nodes, we keep track of them here.
+  void *AV = nullptr;
+
+  /// ProtoType holds the type of the values being rewritten.
+  Type *ProtoType = nullptr;
+
+  /// PHI nodes are given a name based on ProtoName.
+  std::string ProtoName;
+
+  /// If this is non-null, the SSAUpdater adds all PHI nodes that it creates to
+  /// the vector.
+  SmallVectorImpl<PHINode *> *InsertedPHIs;
+
+public:
+  /// If InsertedPHIs is specified, it will be filled
+  /// in with all PHI Nodes created by rewriting.
+  explicit SSAUpdater(SmallVectorImpl<PHINode *> *InsertedPHIs = nullptr);
+  SSAUpdater(const SSAUpdater &) = delete;
+  SSAUpdater &operator=(const SSAUpdater &) = delete;
+  ~SSAUpdater();
+
+  /// \brief Reset this object to get ready for a new set of SSA updates with
+  /// type 'Ty'.
+  ///
+  /// PHI nodes get a name based on 'Name'.
+  void Initialize(Type *Ty, StringRef Name);
+
+  /// \brief Indicate that a rewritten value is available in the specified block
+  /// with the specified value.
+  void AddAvailableValue(BasicBlock *BB, Value *V);
+
+  /// \brief Return true if the SSAUpdater already has a value for the specified
+  /// block.
+  bool HasValueForBlock(BasicBlock *BB) const;
+
+  /// \brief Construct SSA form, materializing a value that is live at the end
+  /// of the specified block.
+  Value *GetValueAtEndOfBlock(BasicBlock *BB);
+
+  /// \brief Construct SSA form, materializing a value that is live in the
+  /// middle of the specified block.
+  ///
+  /// \c GetValueInMiddleOfBlock is the same as \c GetValueAtEndOfBlock except
+  /// in one important case: if there is a definition of the rewritten value
+  /// after the 'use' in BB.  Consider code like this:
+  ///
+  /// \code
+  ///      X1 = ...
+  ///   SomeBB:
+  ///      use(X)
+  ///      X2 = ...
+  ///      br Cond, SomeBB, OutBB
+  /// \endcode
+  ///
+  /// In this case, there are two values (X1 and X2) added to the AvailableVals
+  /// set by the client of the rewriter, and those values are both live out of
+  /// their respective blocks.  However, the use of X happens in the *middle* of
+  /// a block.  Because of this, we need to insert a new PHI node in SomeBB to
+  /// merge the appropriate values, and this value isn't live out of the block.
+  Value *GetValueInMiddleOfBlock(BasicBlock *BB);
+
+  /// \brief Rewrite a use of the symbolic value.
+  ///
+  /// This handles PHI nodes, which use their value in the corresponding
+  /// predecessor. Note that this will not work if the use is supposed to be
+  /// rewritten to a value defined in the same block as the use, but above it.
+  /// Any 'AddAvailableValue's added for the use's block will be considered to
+  /// be below it.
+  void RewriteUse(Use &U);
+
+  /// \brief Rewrite a use like \c RewriteUse but handling in-block definitions.
+  ///
+  /// This version of the method can rewrite uses in the same block as
+  /// a definition, because it assumes that all uses of a value are below any
+  /// inserted values.
+  void RewriteUseAfterInsertions(Use &U);
+
+private:
+  Value *GetValueAtEndOfBlockInternal(BasicBlock *BB);
+};
+
+/// \brief Helper class for promoting a collection of loads and stores into SSA
+/// Form using the SSAUpdater.
+///
+/// This handles complexities that SSAUpdater doesn't, such as multiple loads
+/// and stores in one block.
+///
+/// Clients of this class are expected to subclass this and implement the
+/// virtual methods.
+class LoadAndStorePromoter {
+protected:
+  SSAUpdater &SSA;
+
+public:
+  LoadAndStorePromoter(ArrayRef<const Instruction *> Insts,
+                       SSAUpdater &S, StringRef Name = StringRef());
+  virtual ~LoadAndStorePromoter() = default;
+
+  /// \brief This does the promotion.
+  ///
+  /// Insts is a list of loads and stores to promote, and Name is the basename
+  /// for the PHIs to insert. After this is complete, the loads and stores are
+  /// removed from the code.
+  void run(const SmallVectorImpl<Instruction *> &Insts) const;
+
+  /// \brief Return true if the specified instruction is in the Inst list.
+  ///
+  /// The Insts list is the one passed into the constructor. Clients should
+  /// implement this with a more efficient version if possible.
+  virtual bool isInstInList(Instruction *I,
+                            const SmallVectorImpl<Instruction *> &Insts) const;
+
+  /// \brief This hook is invoked after all the stores are found and inserted as
+  /// available values.
+  virtual void doExtraRewritesBeforeFinalDeletion() const {}
+
+  /// \brief Clients can choose to implement this to get notified right before
+  /// a load is RAUW'd another value.
+  virtual void replaceLoadWithValue(LoadInst *LI, Value *V) const {}
+
+  /// \brief Called before each instruction is deleted.
+  virtual void instructionDeleted(Instruction *I) const {}
+
+  /// \brief Called to update debug info associated with the instruction.
+  virtual void updateDebugInfo(Instruction *I) const {}
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_SSAUPDATER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SSAUpdaterImpl.h b/linux-x64/clang/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
new file mode 100644
index 0000000..3c8bd17
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
@@ -0,0 +1,469 @@
+//===- SSAUpdaterImpl.h - SSA Updater Implementation ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides a template that implements the core algorithm for the
+// SSAUpdater and MachineSSAUpdater.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
+#define LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "ssaupdater"
+
+namespace llvm {
+
+template<typename T> class SSAUpdaterTraits;
+
+template<typename UpdaterT>
+class SSAUpdaterImpl {
+private:
+  UpdaterT *Updater;
+
+  using Traits = SSAUpdaterTraits<UpdaterT>;
+  using BlkT = typename Traits::BlkT;
+  using ValT = typename Traits::ValT;
+  using PhiT = typename Traits::PhiT;
+
+  /// BBInfo - Per-basic block information used internally by SSAUpdaterImpl.
+  /// The predecessors of each block are cached here since pred_iterator is
+  /// slow and we need to iterate over the blocks at least a few times.
+  class BBInfo {
+  public:
+    // Back-pointer to the corresponding block.
+    BlkT *BB;
+
+    // Value to use in this block.
+    ValT AvailableVal;
+
+    // Block that defines the available value.
+    BBInfo *DefBB;
+
+    // Postorder number.
+    int BlkNum = 0;
+
+    // Immediate dominator.
+    BBInfo *IDom = nullptr;
+
+    // Number of predecessor blocks.
+    unsigned NumPreds = 0;
+
+    // Array[NumPreds] of predecessor blocks.
+    BBInfo **Preds = nullptr;
+
+    // Marker for existing PHIs that match.
+    PhiT *PHITag = nullptr;
+
+    BBInfo(BlkT *ThisBB, ValT V)
+      : BB(ThisBB), AvailableVal(V), DefBB(V ? this : nullptr) {}
+  };
+
+  using AvailableValsTy = DenseMap<BlkT *, ValT>;
+
+  AvailableValsTy *AvailableVals;
+
+  SmallVectorImpl<PhiT *> *InsertedPHIs;
+
+  using BlockListTy = SmallVectorImpl<BBInfo *>;
+  using BBMapTy = DenseMap<BlkT *, BBInfo *>;
+
+  BBMapTy BBMap;
+  BumpPtrAllocator Allocator;
+
+public:
+  explicit SSAUpdaterImpl(UpdaterT *U, AvailableValsTy *A,
+                          SmallVectorImpl<PhiT *> *Ins) :
+    Updater(U), AvailableVals(A), InsertedPHIs(Ins) {}
+
+  /// GetValue - Check to see if AvailableVals has an entry for the specified
+  /// BB and if so, return it.  If not, construct SSA form by first
+  /// calculating the required placement of PHIs and then inserting new PHIs
+  /// where needed.
+  ValT GetValue(BlkT *BB) {
+    SmallVector<BBInfo *, 100> BlockList;
+    BBInfo *PseudoEntry = BuildBlockList(BB, &BlockList);
+
+    // Special case: bail out if BB is unreachable.
+    if (BlockList.size() == 0) {
+      ValT V = Traits::GetUndefVal(BB, Updater);
+      (*AvailableVals)[BB] = V;
+      return V;
+    }
+
+    FindDominators(&BlockList, PseudoEntry);
+    FindPHIPlacement(&BlockList);
+    FindAvailableVals(&BlockList);
+
+    return BBMap[BB]->DefBB->AvailableVal;
+  }
+
+  /// BuildBlockList - Starting from the specified basic block, traverse back
+  /// through its predecessors until reaching blocks with known values.
+  /// Create BBInfo structures for the blocks and append them to the block
+  /// list.
+  BBInfo *BuildBlockList(BlkT *BB, BlockListTy *BlockList) {
+    SmallVector<BBInfo *, 10> RootList;
+    SmallVector<BBInfo *, 64> WorkList;
+
+    BBInfo *Info = new (Allocator) BBInfo(BB, 0);
+    BBMap[BB] = Info;
+    WorkList.push_back(Info);
+
+    // Search backward from BB, creating BBInfos along the way and stopping
+    // when reaching blocks that define the value.  Record those defining
+    // blocks on the RootList.
+    SmallVector<BlkT *, 10> Preds;
+    while (!WorkList.empty()) {
+      Info = WorkList.pop_back_val();
+      Preds.clear();
+      Traits::FindPredecessorBlocks(Info->BB, &Preds);
+      Info->NumPreds = Preds.size();
+      if (Info->NumPreds == 0)
+        Info->Preds = nullptr;
+      else
+        Info->Preds = static_cast<BBInfo **>(Allocator.Allocate(
+            Info->NumPreds * sizeof(BBInfo *), alignof(BBInfo *)));
+
+      for (unsigned p = 0; p != Info->NumPreds; ++p) {
+        BlkT *Pred = Preds[p];
+        // Check if BBMap already has a BBInfo for the predecessor block.
+        typename BBMapTy::value_type &BBMapBucket =
+          BBMap.FindAndConstruct(Pred);
+        if (BBMapBucket.second) {
+          Info->Preds[p] = BBMapBucket.second;
+          continue;
+        }
+
+        // Create a new BBInfo for the predecessor.
+        ValT PredVal = AvailableVals->lookup(Pred);
+        BBInfo *PredInfo = new (Allocator) BBInfo(Pred, PredVal);
+        BBMapBucket.second = PredInfo;
+        Info->Preds[p] = PredInfo;
+
+        if (PredInfo->AvailableVal) {
+          RootList.push_back(PredInfo);
+          continue;
+        }
+        WorkList.push_back(PredInfo);
+      }
+    }
+
+    // Now that we know what blocks are backwards-reachable from the starting
+    // block, do a forward depth-first traversal to assign postorder numbers
+    // to those blocks.
+    BBInfo *PseudoEntry = new (Allocator) BBInfo(nullptr, 0);
+    unsigned BlkNum = 1;
+
+    // Initialize the worklist with the roots from the backward traversal.
+    while (!RootList.empty()) {
+      Info = RootList.pop_back_val();
+      Info->IDom = PseudoEntry;
+      Info->BlkNum = -1;
+      WorkList.push_back(Info);
+    }
+
+    while (!WorkList.empty()) {
+      Info = WorkList.back();
+
+      if (Info->BlkNum == -2) {
+        // All the successors have been handled; assign the postorder number.
+        Info->BlkNum = BlkNum++;
+        // If not a root, put it on the BlockList.
+        if (!Info->AvailableVal)
+          BlockList->push_back(Info);
+        WorkList.pop_back();
+        continue;
+      }
+
+      // Leave this entry on the worklist, but set its BlkNum to mark that its
+      // successors have been put on the worklist.  When it returns to the top
+      // the list, after handling its successors, it will be assigned a
+      // number.
+      Info->BlkNum = -2;
+
+      // Add unvisited successors to the work list.
+      for (typename Traits::BlkSucc_iterator SI =
+             Traits::BlkSucc_begin(Info->BB),
+             E = Traits::BlkSucc_end(Info->BB); SI != E; ++SI) {
+        BBInfo *SuccInfo = BBMap[*SI];
+        if (!SuccInfo || SuccInfo->BlkNum)
+          continue;
+        SuccInfo->BlkNum = -1;
+        WorkList.push_back(SuccInfo);
+      }
+    }
+    PseudoEntry->BlkNum = BlkNum;
+    return PseudoEntry;
+  }
+
+  /// IntersectDominators - This is the dataflow lattice "meet" operation for
+  /// finding dominators.  Given two basic blocks, it walks up the dominator
+  /// tree until it finds a common dominator of both.  It uses the postorder
+  /// number of the blocks to determine how to do that.
+  BBInfo *IntersectDominators(BBInfo *Blk1, BBInfo *Blk2) {
+    while (Blk1 != Blk2) {
+      while (Blk1->BlkNum < Blk2->BlkNum) {
+        Blk1 = Blk1->IDom;
+        if (!Blk1)
+          return Blk2;
+      }
+      while (Blk2->BlkNum < Blk1->BlkNum) {
+        Blk2 = Blk2->IDom;
+        if (!Blk2)
+          return Blk1;
+      }
+    }
+    return Blk1;
+  }
+
+  /// FindDominators - Calculate the dominator tree for the subset of the CFG
+  /// corresponding to the basic blocks on the BlockList.  This uses the
+  /// algorithm from: "A Simple, Fast Dominance Algorithm" by Cooper, Harvey
+  /// and Kennedy, published in Software--Practice and Experience, 2001,
+  /// 4:1-10.  Because the CFG subset does not include any edges leading into
+  /// blocks that define the value, the results are not the usual dominator
+  /// tree.  The CFG subset has a single pseudo-entry node with edges to a set
+  /// of root nodes for blocks that define the value.  The dominators for this
+  /// subset CFG are not the standard dominators but they are adequate for
+  /// placing PHIs within the subset CFG.
+  void FindDominators(BlockListTy *BlockList, BBInfo *PseudoEntry) {
+    bool Changed;
+    do {
+      Changed = false;
+      // Iterate over the list in reverse order, i.e., forward on CFG edges.
+      for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
+             E = BlockList->rend(); I != E; ++I) {
+        BBInfo *Info = *I;
+        BBInfo *NewIDom = nullptr;
+
+        // Iterate through the block's predecessors.
+        for (unsigned p = 0; p != Info->NumPreds; ++p) {
+          BBInfo *Pred = Info->Preds[p];
+
+          // Treat an unreachable predecessor as a definition with 'undef'.
+          if (Pred->BlkNum == 0) {
+            Pred->AvailableVal = Traits::GetUndefVal(Pred->BB, Updater);
+            (*AvailableVals)[Pred->BB] = Pred->AvailableVal;
+            Pred->DefBB = Pred;
+            Pred->BlkNum = PseudoEntry->BlkNum;
+            PseudoEntry->BlkNum++;
+          }
+
+          if (!NewIDom)
+            NewIDom = Pred;
+          else
+            NewIDom = IntersectDominators(NewIDom, Pred);
+        }
+
+        // Check if the IDom value has changed.
+        if (NewIDom && NewIDom != Info->IDom) {
+          Info->IDom = NewIDom;
+          Changed = true;
+        }
+      }
+    } while (Changed);
+  }
+
+  /// IsDefInDomFrontier - Search up the dominator tree from Pred to IDom for
+  /// any blocks containing definitions of the value.  If one is found, then
+  /// the successor of Pred is in the dominance frontier for the definition,
+  /// and this function returns true.
+  bool IsDefInDomFrontier(const BBInfo *Pred, const BBInfo *IDom) {
+    for (; Pred != IDom; Pred = Pred->IDom) {
+      if (Pred->DefBB == Pred)
+        return true;
+    }
+    return false;
+  }
+
+  /// FindPHIPlacement - PHIs are needed in the iterated dominance frontiers
+  /// of the known definitions.  Iteratively add PHIs in the dom frontiers
+  /// until nothing changes.  Along the way, keep track of the nearest
+  /// dominating definitions for non-PHI blocks.
+  void FindPHIPlacement(BlockListTy *BlockList) {
+    bool Changed;
+    do {
+      Changed = false;
+      // Iterate over the list in reverse order, i.e., forward on CFG edges.
+      for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
+             E = BlockList->rend(); I != E; ++I) {
+        BBInfo *Info = *I;
+
+        // If this block already needs a PHI, there is nothing to do here.
+        if (Info->DefBB == Info)
+          continue;
+
+        // Default to use the same def as the immediate dominator.
+        BBInfo *NewDefBB = Info->IDom->DefBB;
+        for (unsigned p = 0; p != Info->NumPreds; ++p) {
+          if (IsDefInDomFrontier(Info->Preds[p], Info->IDom)) {
+            // Need a PHI here.
+            NewDefBB = Info;
+            break;
+          }
+        }
+
+        // Check if anything changed.
+        if (NewDefBB != Info->DefBB) {
+          Info->DefBB = NewDefBB;
+          Changed = true;
+        }
+      }
+    } while (Changed);
+  }
+
+  /// FindAvailableVal - If this block requires a PHI, first check if an
+  /// existing PHI matches the PHI placement and reaching definitions computed
+  /// earlier, and if not, create a new PHI.  Visit all the block's
+  /// predecessors to calculate the available value for each one and fill in
+  /// the incoming values for a new PHI.
+  void FindAvailableVals(BlockListTy *BlockList) {
+    // Go through the worklist in forward order (i.e., backward through the CFG)
+    // and check if existing PHIs can be used.  If not, create empty PHIs where
+    // they are needed.
+    for (typename BlockListTy::iterator I = BlockList->begin(),
+           E = BlockList->end(); I != E; ++I) {
+      BBInfo *Info = *I;
+      // Check if there needs to be a PHI in BB.
+      if (Info->DefBB != Info)
+        continue;
+
+      // Look for an existing PHI.
+      FindExistingPHI(Info->BB, BlockList);
+      if (Info->AvailableVal)
+        continue;
+
+      ValT PHI = Traits::CreateEmptyPHI(Info->BB, Info->NumPreds, Updater);
+      Info->AvailableVal = PHI;
+      (*AvailableVals)[Info->BB] = PHI;
+    }
+
+    // Now go back through the worklist in reverse order to fill in the
+    // arguments for any new PHIs added in the forward traversal.
+    for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
+           E = BlockList->rend(); I != E; ++I) {
+      BBInfo *Info = *I;
+
+      if (Info->DefBB != Info) {
+        // Record the available value at join nodes to speed up subsequent
+        // uses of this SSAUpdater for the same value.
+        if (Info->NumPreds > 1)
+          (*AvailableVals)[Info->BB] = Info->DefBB->AvailableVal;
+        continue;
+      }
+
+      // Check if this block contains a newly added PHI.
+      PhiT *PHI = Traits::ValueIsNewPHI(Info->AvailableVal, Updater);
+      if (!PHI)
+        continue;
+
+      // Iterate through the block's predecessors.
+      for (unsigned p = 0; p != Info->NumPreds; ++p) {
+        BBInfo *PredInfo = Info->Preds[p];
+        BlkT *Pred = PredInfo->BB;
+        // Skip to the nearest preceding definition.
+        if (PredInfo->DefBB != PredInfo)
+          PredInfo = PredInfo->DefBB;
+        Traits::AddPHIOperand(PHI, PredInfo->AvailableVal, Pred);
+      }
+
+      DEBUG(dbgs() << "  Inserted PHI: " << *PHI << "\n");
+
+      // If the client wants to know about all new instructions, tell it.
+      if (InsertedPHIs) InsertedPHIs->push_back(PHI);
+    }
+  }
+
+  /// FindExistingPHI - Look through the PHI nodes in a block to see if any of
+  /// them match what is needed.
+  void FindExistingPHI(BlkT *BB, BlockListTy *BlockList) {
+    for (auto &SomePHI : BB->phis()) {
+      if (CheckIfPHIMatches(&SomePHI)) {
+        RecordMatchingPHIs(BlockList);
+        break;
+      }
+      // Match failed: clear all the PHITag values.
+      for (typename BlockListTy::iterator I = BlockList->begin(),
+             E = BlockList->end(); I != E; ++I)
+        (*I)->PHITag = nullptr;
+    }
+  }
+
+  /// CheckIfPHIMatches - Check if a PHI node matches the placement and values
+  /// in the BBMap.
+  bool CheckIfPHIMatches(PhiT *PHI) {
+    SmallVector<PhiT *, 20> WorkList;
+    WorkList.push_back(PHI);
+
+    // Mark that the block containing this PHI has been visited.
+    BBMap[PHI->getParent()]->PHITag = PHI;
+
+    while (!WorkList.empty()) {
+      PHI = WorkList.pop_back_val();
+
+      // Iterate through the PHI's incoming values.
+      for (typename Traits::PHI_iterator I = Traits::PHI_begin(PHI),
+             E = Traits::PHI_end(PHI); I != E; ++I) {
+        ValT IncomingVal = I.getIncomingValue();
+        BBInfo *PredInfo = BBMap[I.getIncomingBlock()];
+        // Skip to the nearest preceding definition.
+        if (PredInfo->DefBB != PredInfo)
+          PredInfo = PredInfo->DefBB;
+
+        // Check if it matches the expected value.
+        if (PredInfo->AvailableVal) {
+          if (IncomingVal == PredInfo->AvailableVal)
+            continue;
+          return false;
+        }
+
+        // Check if the value is a PHI in the correct block.
+        PhiT *IncomingPHIVal = Traits::ValueIsPHI(IncomingVal, Updater);
+        if (!IncomingPHIVal || IncomingPHIVal->getParent() != PredInfo->BB)
+          return false;
+
+        // If this block has already been visited, check if this PHI matches.
+        if (PredInfo->PHITag) {
+          if (IncomingPHIVal == PredInfo->PHITag)
+            continue;
+          return false;
+        }
+        PredInfo->PHITag = IncomingPHIVal;
+
+        WorkList.push_back(IncomingPHIVal);
+      }
+    }
+    return true;
+  }
+
+  /// RecordMatchingPHIs - For each PHI node that matches, record it in both
+  /// the BBMap and the AvailableVals mapping.
+  void RecordMatchingPHIs(BlockListTy *BlockList) {
+    for (typename BlockListTy::iterator I = BlockList->begin(),
+           E = BlockList->end(); I != E; ++I)
+      if (PhiT *PHI = (*I)->PHITag) {
+        BlkT *BB = PHI->getParent();
+        ValT PHIVal = Traits::GetPHIValue(PHI);
+        (*AvailableVals)[BB] = PHIVal;
+        BBMap[BB]->AvailableVal = PHIVal;
+      }
+  }
+};
+
+} // end namespace llvm
+
+#undef DEBUG_TYPE // "ssaupdater"
+
+#endif // LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SanitizerStats.h b/linux-x64/clang/include/llvm/Transforms/Utils/SanitizerStats.h
new file mode 100644
index 0000000..d36e342
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SanitizerStats.h
@@ -0,0 +1,56 @@
+//===- SanitizerStats.h - Sanitizer statistics gathering  -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Declares functions and data structures for sanitizer statistics gathering.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SANITIZERSTATS_H
+#define LLVM_TRANSFORMS_UTILS_SANITIZERSTATS_H
+
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+
+// Number of bits in data that are used for the sanitizer kind. Needs to match
+// __sanitizer::kKindBits in compiler-rt/lib/stats/stats.h
+enum { kSanitizerStatKindBits = 3 };
+
+enum SanitizerStatKind {
+  SanStat_CFI_VCall,
+  SanStat_CFI_NVCall,
+  SanStat_CFI_DerivedCast,
+  SanStat_CFI_UnrelatedCast,
+  SanStat_CFI_ICall,
+};
+
+struct SanitizerStatReport {
+  SanitizerStatReport(Module *M);
+
+  /// Generates code into B that increments a location-specific counter tagged
+  /// with the given sanitizer kind SK.
+  void create(IRBuilder<> &B, SanitizerStatKind SK);
+
+  /// Finalize module stats array and add global constructor to register it.
+  void finish();
+
+private:
+  Module *M;
+  GlobalVariable *ModuleStatsGV;
+  ArrayType *StatTy;
+  StructType *EmptyModuleStatsTy;
+
+  std::vector<Constant *> Inits;
+  ArrayType *makeModuleStatsArrayTy();
+  StructType *makeModuleStatsTy();
+};
+
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyIndVar.h b/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyIndVar.h
new file mode 100644
index 0000000..a1dfed2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyIndVar.h
@@ -0,0 +1,60 @@
+//===-- llvm/Transforms/Utils/SimplifyIndVar.h - Indvar Utils ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines in interface for induction variable simplification. It does
+// not define any actual pass or policy, but provides a single function to
+// simplify a loop's induction variables based on ScalarEvolution.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H
+#define LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H
+
+#include "llvm/IR/ValueHandle.h"
+
+namespace llvm {
+
+class CastInst;
+class DominatorTree;
+class Loop;
+class LoopInfo;
+class PHINode;
+class ScalarEvolution;
+class SCEVExpander;
+
+/// Interface for visiting interesting IV users that are recognized but not
+/// simplified by this utility.
+class IVVisitor {
+protected:
+  const DominatorTree *DT = nullptr;
+
+  virtual void anchor();
+
+public:
+  IVVisitor() = default;
+  virtual ~IVVisitor() = default;
+
+  const DominatorTree *getDomTree() const { return DT; }
+  virtual void visitCast(CastInst *Cast) = 0;
+};
+
+/// simplifyUsersOfIV - Simplify instructions that use this induction variable
+/// by using ScalarEvolution to analyze the IV's recurrence.
+bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, DominatorTree *DT,
+                       LoopInfo *LI, SmallVectorImpl<WeakTrackingVH> &Dead,
+                       SCEVExpander &Rewriter, IVVisitor *V = nullptr);
+
+/// SimplifyLoopIVs - Simplify users of induction variables within this
+/// loop. This does not actually change or add IVs.
+bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, DominatorTree *DT,
+                     LoopInfo *LI, SmallVectorImpl<WeakTrackingVH> &Dead);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyInstructions.h b/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyInstructions.h
new file mode 100644
index 0000000..3f83861
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyInstructions.h
@@ -0,0 +1,31 @@
+//===- SimplifyInstructions.h - Remove redundant instructions ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a utility pass used for testing the InstructionSimplify analysis.
+// The analysis is applied to every instruction, and if it simplifies then the
+// instruction is replaced by the simplification.  If you are looking for a pass
+// that performs serious instruction folding, use the instcombine pass instead.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYINSTRUCTIONS_H
+#define LLVM_TRANSFORMS_UTILS_SIMPLIFYINSTRUCTIONS_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// This pass removes redundant instructions.
+class InstSimplifierPass : public PassInfoMixin<InstSimplifierPass> {
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_SIMPLIFYINSTRUCTIONS_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyLibCalls.h b/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyLibCalls.h
new file mode 100644
index 0000000..73a62f5
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SimplifyLibCalls.h
@@ -0,0 +1,183 @@
+//===- SimplifyLibCalls.h - Library call simplifier -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes an interface to build some C language libcalls for
+// optimization passes that need to call the various functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H
+#define LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+class StringRef;
+class Value;
+class CallInst;
+class DataLayout;
+class Instruction;
+class TargetLibraryInfo;
+class BasicBlock;
+class Function;
+class OptimizationRemarkEmitter;
+
+/// \brief This class implements simplifications for calls to fortified library
+/// functions (__st*cpy_chk, __memcpy_chk, __memmove_chk, __memset_chk), to,
+/// when possible, replace them with their non-checking counterparts.
+/// Other optimizations can also be done, but it's possible to disable them and
+/// only simplify needless use of the checking versions (when the object size
+/// is unknown) by passing true for OnlyLowerUnknownSize.
+class FortifiedLibCallSimplifier {
+private:
+  const TargetLibraryInfo *TLI;
+  bool OnlyLowerUnknownSize;
+
+public:
+  FortifiedLibCallSimplifier(const TargetLibraryInfo *TLI,
+                             bool OnlyLowerUnknownSize = false);
+
+  /// \brief Take the given call instruction and return a more
+  /// optimal value to replace the instruction with or 0 if a more
+  /// optimal form can't be found.
+  /// The call must not be an indirect call.
+  Value *optimizeCall(CallInst *CI);
+
+private:
+  Value *optimizeMemCpyChk(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeMemMoveChk(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeMemSetChk(CallInst *CI, IRBuilder<> &B);
+
+  // Str/Stp cpy are similar enough to be handled in the same functions.
+  Value *optimizeStrpCpyChk(CallInst *CI, IRBuilder<> &B, LibFunc Func);
+  Value *optimizeStrpNCpyChk(CallInst *CI, IRBuilder<> &B, LibFunc Func);
+
+  /// \brief Checks whether the call \p CI to a fortified libcall is foldable
+  /// to the non-fortified version.
+  bool isFortifiedCallFoldable(CallInst *CI, unsigned ObjSizeOp,
+                               unsigned SizeOp, bool isString);
+};
+
+/// LibCallSimplifier - This class implements a collection of optimizations
+/// that replace well formed calls to library functions with a more optimal
+/// form.  For example, replacing 'printf("Hello!")' with 'puts("Hello!")'.
+class LibCallSimplifier {
+private:
+  FortifiedLibCallSimplifier FortifiedSimplifier;
+  const DataLayout &DL;
+  const TargetLibraryInfo *TLI;
+  OptimizationRemarkEmitter &ORE;
+  bool UnsafeFPShrink;
+  function_ref<void(Instruction *, Value *)> Replacer;
+
+  /// \brief Internal wrapper for RAUW that is the default implementation.
+  ///
+  /// Other users may provide an alternate function with this signature instead
+  /// of this one.
+  static void replaceAllUsesWithDefault(Instruction *I, Value *With);
+
+  /// \brief Replace an instruction's uses with a value using our replacer.
+  void replaceAllUsesWith(Instruction *I, Value *With);
+
+public:
+  LibCallSimplifier(const DataLayout &DL, const TargetLibraryInfo *TLI,
+                    OptimizationRemarkEmitter &ORE,
+                    function_ref<void(Instruction *, Value *)> Replacer =
+                        &replaceAllUsesWithDefault);
+
+  /// optimizeCall - Take the given call instruction and return a more
+  /// optimal value to replace the instruction with or 0 if a more
+  /// optimal form can't be found.  Note that the returned value may
+  /// be equal to the instruction being optimized.  In this case all
+  /// other instructions that use the given instruction were modified
+  /// and the given instruction is dead.
+  /// The call must not be an indirect call.
+  Value *optimizeCall(CallInst *CI);
+
+private:
+  // String and Memory Library Call Optimizations
+  Value *optimizeStrCat(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrNCat(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrChr(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrRChr(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrCmp(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrNCmp(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrCpy(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStpCpy(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrNCpy(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrLen(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrPBrk(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrTo(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrSpn(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrCSpn(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeStrStr(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeMemChr(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeMemCmp(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeMemCpy(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeMemMove(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeMemSet(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeWcslen(CallInst *CI, IRBuilder<> &B);
+  // Wrapper for all String/Memory Library Call Optimizations
+  Value *optimizeStringMemoryLibCall(CallInst *CI, IRBuilder<> &B);
+
+  // Math Library Optimizations
+  Value *optimizeCAbs(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeCos(CallInst *CI, IRBuilder<> &B);
+  Value *optimizePow(CallInst *CI, IRBuilder<> &B);
+  Value *replacePowWithSqrt(CallInst *Pow, IRBuilder<> &B);
+  Value *optimizeExp2(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeFMinFMax(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeLog(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeSqrt(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeSinCosPi(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeTan(CallInst *CI, IRBuilder<> &B);
+  // Wrapper for all floating point library call optimizations
+  Value *optimizeFloatingPointLibCall(CallInst *CI, LibFunc Func,
+                                      IRBuilder<> &B);
+
+  // Integer Library Call Optimizations
+  Value *optimizeFFS(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeFls(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeAbs(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeIsDigit(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeIsAscii(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeToAscii(CallInst *CI, IRBuilder<> &B);
+
+  // Formatting and IO Library Call Optimizations
+  Value *optimizeErrorReporting(CallInst *CI, IRBuilder<> &B,
+                                int StreamArg = -1);
+  Value *optimizePrintF(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeSPrintF(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeFPrintF(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeFWrite(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeFPuts(CallInst *CI, IRBuilder<> &B);
+  Value *optimizePuts(CallInst *CI, IRBuilder<> &B);
+
+  // Helper methods
+  Value *emitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len, IRBuilder<> &B);
+  void classifyArgUse(Value *Val, Function *F, bool IsFloat,
+                      SmallVectorImpl<CallInst *> &SinCalls,
+                      SmallVectorImpl<CallInst *> &CosCalls,
+                      SmallVectorImpl<CallInst *> &SinCosCalls);
+  Value *optimizePrintFString(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeSPrintFString(CallInst *CI, IRBuilder<> &B);
+  Value *optimizeFPrintFString(CallInst *CI, IRBuilder<> &B);
+
+  /// hasFloatVersion - Checks if there is a float version of the specified
+  /// function by checking for an existing function with name FuncName + f
+  bool hasFloatVersion(StringRef FuncName);
+
+  /// Shared code to optimize strlen+wcslen.
+  Value *optimizeStringLength(CallInst *CI, IRBuilder<> &B, unsigned CharSize);
+};
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SplitModule.h b/linux-x64/clang/include/llvm/Transforms/Utils/SplitModule.h
new file mode 100644
index 0000000..d2c31f2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SplitModule.h
@@ -0,0 +1,43 @@
+//===- SplitModule.h - Split a module into partitions -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the function llvm::SplitModule, which splits a module
+// into multiple linkable partitions. It can be used to implement parallel code
+// generation for link-time optimization.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SPLITMODULE_H
+#define LLVM_TRANSFORMS_UTILS_SPLITMODULE_H
+
+#include "llvm/ADT/STLExtras.h"
+#include <memory>
+
+namespace llvm {
+
+class Module;
+
+/// Splits the module M into N linkable partitions. The function ModuleCallback
+/// is called N times passing each individual partition as the MPart argument.
+///
+/// FIXME: This function does not deal with the somewhat subtle symbol
+/// visibility issues around module splitting, including (but not limited to):
+///
+/// - Internal symbols should not collide with symbols defined outside the
+///   module.
+/// - Internal symbols defined in module-level inline asm should be visible to
+///   each partition.
+void SplitModule(
+    std::unique_ptr<Module> M, unsigned N,
+    function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback,
+    bool PreserveLocals = false);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_SPLITMODULE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/SymbolRewriter.h b/linux-x64/clang/include/llvm/Transforms/Utils/SymbolRewriter.h
new file mode 100644
index 0000000..e0caf77
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/SymbolRewriter.h
@@ -0,0 +1,142 @@
+//===- SymbolRewriter.h - Symbol Rewriting Pass -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the prototypes and definitions related to the Symbol
+// Rewriter pass.
+//
+// The Symbol Rewriter pass takes a set of rewrite descriptors which define
+// transformations for symbol names.  These can be either single name to name
+// trnsformation or more broad regular expression based transformations.
+//
+// All the functions are re-written at the IR level.  The Symbol Rewriter itself
+// is exposed as a module level pass.  All symbols at the module level are
+// iterated.  For any matching symbol, the requested transformation is applied,
+// updating references to it as well (a la RAUW).  The resulting binary will
+// only contain the rewritten symbols.
+//
+// By performing this operation in the compiler, we are able to catch symbols
+// that would otherwise not be possible to catch (e.g. inlined symbols).
+//
+// This makes it possible to cleanly transform symbols without resorting to
+// overly-complex macro tricks and the pre-processor.  An example of where this
+// is useful is the sanitizers where we would like to intercept a well-defined
+// set of functions across the module.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SYMBOLREWRITER_H
+#define LLVM_TRANSFORMS_UTILS_SYMBOLREWRITER_H
+
+#include "llvm/IR/PassManager.h"
+#include <list>
+#include <memory>
+#include <string>
+
+namespace llvm {
+
+class MemoryBuffer;
+class Module;
+class ModulePass;
+
+namespace yaml {
+
+class KeyValueNode;
+class MappingNode;
+class ScalarNode;
+class Stream;
+
+} // end namespace yaml
+
+namespace SymbolRewriter {
+
+/// The basic entity representing a rewrite operation.  It serves as the base
+/// class for any rewrite descriptor.  It has a certain set of specializations
+/// which describe a particular rewrite.
+///
+/// The RewriteMapParser can be used to parse a mapping file that provides the
+/// mapping for rewriting the symbols.  The descriptors individually describe
+/// whether to rewrite a function, global variable, or global alias.  Each of
+/// these can be selected either by explicitly providing a name for the ones to
+/// be rewritten or providing a (posix compatible) regular expression that will
+/// select the symbols to rewrite.  This descriptor list is passed to the
+/// SymbolRewriter pass.
+class RewriteDescriptor {
+public:
+  enum class Type {
+    Invalid,        /// invalid
+    Function,       /// function - descriptor rewrites a function
+    GlobalVariable, /// global variable - descriptor rewrites a global variable
+    NamedAlias,     /// named alias - descriptor rewrites a global alias
+  };
+
+  RewriteDescriptor(const RewriteDescriptor &) = delete;
+  RewriteDescriptor &operator=(const RewriteDescriptor &) = delete;
+  virtual ~RewriteDescriptor() = default;
+
+  Type getType() const { return Kind; }
+
+  virtual bool performOnModule(Module &M) = 0;
+
+protected:
+  explicit RewriteDescriptor(Type T) : Kind(T) {}
+
+private:
+  const Type Kind;
+};
+
+using RewriteDescriptorList = std::list<std::unique_ptr<RewriteDescriptor>>;
+
+class RewriteMapParser {
+public:
+  bool parse(const std::string &MapFile, RewriteDescriptorList *Descriptors);
+
+private:
+  bool parse(std::unique_ptr<MemoryBuffer> &MapFile, RewriteDescriptorList *DL);
+  bool parseEntry(yaml::Stream &Stream, yaml::KeyValueNode &Entry,
+                  RewriteDescriptorList *DL);
+  bool parseRewriteFunctionDescriptor(yaml::Stream &Stream,
+                                      yaml::ScalarNode *Key,
+                                      yaml::MappingNode *Value,
+                                      RewriteDescriptorList *DL);
+  bool parseRewriteGlobalVariableDescriptor(yaml::Stream &Stream,
+                                            yaml::ScalarNode *Key,
+                                            yaml::MappingNode *Value,
+                                            RewriteDescriptorList *DL);
+  bool parseRewriteGlobalAliasDescriptor(yaml::Stream &YS, yaml::ScalarNode *K,
+                                         yaml::MappingNode *V,
+                                         RewriteDescriptorList *DL);
+};
+
+} // end namespace SymbolRewriter
+
+ModulePass *createRewriteSymbolsPass();
+ModulePass *createRewriteSymbolsPass(SymbolRewriter::RewriteDescriptorList &);
+
+class RewriteSymbolPass : public PassInfoMixin<RewriteSymbolPass> {
+public:
+  RewriteSymbolPass() { loadAndParseMapFiles(); }
+
+  RewriteSymbolPass(SymbolRewriter::RewriteDescriptorList &DL) {
+    Descriptors.splice(Descriptors.begin(), DL);
+  }
+
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+
+  // Glue for old PM
+  bool runImpl(Module &M);
+
+private:
+  void loadAndParseMapFiles();
+
+  SymbolRewriter::RewriteDescriptorList Descriptors;  
+};
+
+} // end namespace llvm
+
+#endif //LLVM_TRANSFORMS_UTILS_SYMBOLREWRITER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h b/linux-x64/clang/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
new file mode 100644
index 0000000..222c601
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
@@ -0,0 +1,54 @@
+//===-- UnifyFunctionExitNodes.h - Ensure fn's have one return --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass is used to ensure that functions have at most one return and one
+// unwind instruction in them.  Additionally, it keeps track of which node is
+// the new exit node of the CFG.  If there are no return or unwind instructions
+// in the function, the getReturnBlock/getUnwindBlock methods will return a null
+// pointer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H
+#define LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H
+
+#include "llvm/Pass.h"
+#include "llvm/PassRegistry.h"
+
+namespace llvm {
+
+struct UnifyFunctionExitNodes : public FunctionPass {
+  BasicBlock *ReturnBlock = nullptr;
+  BasicBlock *UnwindBlock = nullptr;
+  BasicBlock *UnreachableBlock;
+
+public:
+  static char ID; // Pass identification, replacement for typeid
+  UnifyFunctionExitNodes() : FunctionPass(ID) {
+    initializeUnifyFunctionExitNodesPass(*PassRegistry::getPassRegistry());
+  }
+
+  // We can preserve non-critical-edgeness when we unify function exit nodes
+  void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+  // getReturn|Unwind|UnreachableBlock - Return the new single (or nonexistent)
+  // return, unwind, or unreachable  basic blocks in the CFG.
+  //
+  BasicBlock *getReturnBlock() const { return ReturnBlock; }
+  BasicBlock *getUnwindBlock() const { return UnwindBlock; }
+  BasicBlock *getUnreachableBlock() const { return UnreachableBlock; }
+
+  bool runOnFunction(Function &F) override;
+};
+
+Pass *createUnifyFunctionExitNodesPass();
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/UnrollLoop.h b/linux-x64/clang/include/llvm/Transforms/Utils/UnrollLoop.h
new file mode 100644
index 0000000..3983637
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/UnrollLoop.h
@@ -0,0 +1,83 @@
+//===- llvm/Transforms/Utils/UnrollLoop.h - Unrolling utilities -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines some loop unrolling utilities. It does not define any
+// actual pass or policy, but provides a single function to perform loop
+// unrolling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
+#define LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+
+namespace llvm {
+
+class AssumptionCache;
+class BasicBlock;
+class DominatorTree;
+class Loop;
+class LoopInfo;
+class MDNode;
+class OptimizationRemarkEmitter;
+class ScalarEvolution;
+
+using NewLoopsMap = SmallDenseMap<const Loop *, Loop *, 4>;
+
+const Loop* addClonedBlockToLoopInfo(BasicBlock *OriginalBB,
+                                     BasicBlock *ClonedBB, LoopInfo *LI,
+                                     NewLoopsMap &NewLoops);
+
+/// Represents the result of a \c UnrollLoop invocation.
+enum class LoopUnrollResult {
+  /// The loop was not modified.
+  Unmodified,
+
+  /// The loop was partially unrolled -- we still have a loop, but with a
+  /// smaller trip count.  We may also have emitted epilogue loop if the loop
+  /// had a non-constant trip count.
+  PartiallyUnrolled,
+
+  /// The loop was fully unrolled into straight-line code.  We no longer have
+  /// any back-edges.
+  FullyUnrolled
+};
+
+LoopUnrollResult UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
+                            bool Force, bool AllowRuntime,
+                            bool AllowExpensiveTripCount, bool PreserveCondBr,
+                            bool PreserveOnlyFirst, unsigned TripMultiple,
+                            unsigned PeelCount, bool UnrollRemainder,
+                            LoopInfo *LI, ScalarEvolution *SE,
+                            DominatorTree *DT, AssumptionCache *AC,
+                            OptimizationRemarkEmitter *ORE, bool PreserveLCSSA);
+
+bool UnrollRuntimeLoopRemainder(Loop *L, unsigned Count,
+                                bool AllowExpensiveTripCount,
+                                bool UseEpilogRemainder, bool UnrollRemainder,
+                                LoopInfo *LI,
+                                ScalarEvolution *SE, DominatorTree *DT,
+                                AssumptionCache *AC,
+                                bool PreserveLCSSA);
+
+void computePeelCount(Loop *L, unsigned LoopSize,
+                      TargetTransformInfo::UnrollingPreferences &UP,
+                      unsigned &TripCount, ScalarEvolution &SE);
+
+bool peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI, ScalarEvolution *SE,
+              DominatorTree *DT, AssumptionCache *AC, bool PreserveLCSSA);
+
+MDNode *GetUnrollMetadata(MDNode *LoopID, StringRef Name);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/VNCoercion.h b/linux-x64/clang/include/llvm/Transforms/Utils/VNCoercion.h
new file mode 100644
index 0000000..1baa9b6
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/VNCoercion.h
@@ -0,0 +1,108 @@
+//===- VNCoercion.h - Value Numbering Coercion Utilities --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file / This file provides routines used by LLVM's value numbering passes to
+/// perform various forms of value extraction from memory when the types are not
+/// identical.  For example, given
+///
+/// store i32 8, i32 *%foo
+/// %a = bitcast i32 *%foo to i16
+/// %val = load i16, i16 *%a
+///
+/// It possible to extract the value of the load of %a from the store to %foo.
+/// These routines know how to tell whether they can do that (the analyze*
+/// routines), and can also insert the necessary IR to do it (the get*
+/// routines).
+
+#ifndef LLVM_TRANSFORMS_UTILS_VNCOERCION_H
+#define LLVM_TRANSFORMS_UTILS_VNCOERCION_H
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+class Function;
+class StoreInst;
+class LoadInst;
+class MemIntrinsic;
+class Instruction;
+class Value;
+class Type;
+class DataLayout;
+namespace VNCoercion {
+/// Return true if CoerceAvailableValueToLoadType would succeed if it was
+/// called.
+bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy,
+                                     const DataLayout &DL);
+
+/// If we saw a store of a value to memory, and then a load from a must-aliased
+/// pointer of a different type, try to coerce the stored value to the loaded
+/// type.  LoadedTy is the type of the load we want to replace.  IRB is
+/// IRBuilder used to insert new instructions.
+///
+/// If we can't do it, return null.
+Value *coerceAvailableValueToLoadType(Value *StoredVal, Type *LoadedTy,
+                                      IRBuilder<> &IRB, const DataLayout &DL);
+
+/// This function determines whether a value for the pointer LoadPtr can be
+/// extracted from the store at DepSI.
+///
+/// On success, it returns the offset into DepSI that extraction would start.
+/// On failure, it returns -1.
+int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
+                                   StoreInst *DepSI, const DataLayout &DL);
+
+/// This function determines whether a value for the pointer LoadPtr can be
+/// extracted from the load at DepLI.
+///
+/// On success, it returns the offset into DepLI that extraction would start.
+/// On failure, it returns -1.
+int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI,
+                                  const DataLayout &DL);
+
+/// This function determines whether a value for the pointer LoadPtr can be
+/// extracted from the memory intrinsic at DepMI.
+///
+/// On success, it returns the offset into DepMI that extraction would start.
+/// On failure, it returns -1.
+int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
+                                     MemIntrinsic *DepMI, const DataLayout &DL);
+
+/// If analyzeLoadFromClobberingStore returned an offset, this function can be
+/// used to actually perform the extraction of the bits from the store. It
+/// inserts instructions to do so at InsertPt, and returns the extracted value.
+Value *getStoreValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy,
+                            Instruction *InsertPt, const DataLayout &DL);
+// This is the same as getStoreValueForLoad, except it performs no insertion
+// It only allows constant inputs.
+Constant *getConstantStoreValueForLoad(Constant *SrcVal, unsigned Offset,
+                                       Type *LoadTy, const DataLayout &DL);
+
+/// If analyzeLoadFromClobberingLoad returned an offset, this function can be
+/// used to actually perform the extraction of the bits from the load, including
+/// any necessary load widening.  It inserts instructions to do so at InsertPt,
+/// and returns the extracted value.
+Value *getLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy,
+                           Instruction *InsertPt, const DataLayout &DL);
+// This is the same as getLoadValueForLoad, except it is given the load value as
+// a constant. It returns nullptr if it would require widening the load.
+Constant *getConstantLoadValueForLoad(Constant *SrcVal, unsigned Offset,
+                                      Type *LoadTy, const DataLayout &DL);
+
+/// If analyzeLoadFromClobberingMemInst returned an offset, this function can be
+/// used to actually perform the extraction of the bits from the memory
+/// intrinsic.  It inserts instructions to do so at InsertPt, and returns the
+/// extracted value.
+Value *getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
+                              Type *LoadTy, Instruction *InsertPt,
+                              const DataLayout &DL);
+// This is the same as getStoreValueForLoad, except it performs no insertion.
+// It returns nullptr if it cannot produce a constant.
+Constant *getConstantMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
+                                         Type *LoadTy, const DataLayout &DL);
+}
+}
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Utils/ValueMapper.h b/linux-x64/clang/include/llvm/Transforms/Utils/ValueMapper.h
new file mode 100644
index 0000000..4ecb23e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Utils/ValueMapper.h
@@ -0,0 +1,281 @@
+//===- ValueMapper.h - Remapping for constants and metadata -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MapValue interface which is used by various parts of
+// the Transforms/Utils library to implement cloning and linking facilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H
+#define LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/IR/ValueMap.h"
+
+namespace llvm {
+
+class Constant;
+class Function;
+class GlobalAlias;
+class GlobalVariable;
+class Instruction;
+class MDNode;
+class Metadata;
+class Type;
+class Value;
+
+using ValueToValueMapTy = ValueMap<const Value *, WeakTrackingVH>;
+
+/// This is a class that can be implemented by clients to remap types when
+/// cloning constants and instructions.
+class ValueMapTypeRemapper {
+  virtual void anchor(); // Out of line method.
+
+public:
+  virtual ~ValueMapTypeRemapper() = default;
+
+  /// The client should implement this method if they want to remap types while
+  /// mapping values.
+  virtual Type *remapType(Type *SrcTy) = 0;
+};
+
+/// This is a class that can be implemented by clients to materialize Values on
+/// demand.
+class ValueMaterializer {
+  virtual void anchor(); // Out of line method.
+
+protected:
+  ValueMaterializer() = default;
+  ValueMaterializer(const ValueMaterializer &) = default;
+  ValueMaterializer &operator=(const ValueMaterializer &) = default;
+  ~ValueMaterializer() = default;
+
+public:
+  /// This method can be implemented to generate a mapped Value on demand. For
+  /// example, if linking lazily. Returns null if the value is not materialized.
+  virtual Value *materialize(Value *V) = 0;
+};
+
+/// These are flags that the value mapping APIs allow.
+enum RemapFlags {
+  RF_None = 0,
+
+  /// If this flag is set, the remapper knows that only local values within a
+  /// function (such as an instruction or argument) are mapped, not global
+  /// values like functions and global metadata.
+  RF_NoModuleLevelChanges = 1,
+
+  /// If this flag is set, the remapper ignores missing function-local entries
+  /// (Argument, Instruction, BasicBlock) that are not in the value map.  If it
+  /// is unset, it aborts if an operand is asked to be remapped which doesn't
+  /// exist in the mapping.
+  ///
+  /// There are no such assertions in MapValue(), whose results are almost
+  /// unchanged by this flag.  This flag mainly changes the assertion behaviour
+  /// in RemapInstruction().
+  ///
+  /// Since an Instruction's metadata operands (even that point to SSA values)
+  /// aren't guaranteed to be dominated by their definitions, MapMetadata will
+  /// return "!{}" instead of "null" for \a LocalAsMetadata instances whose SSA
+  /// values are unmapped when this flag is set.  Otherwise, \a MapValue()
+  /// completely ignores this flag.
+  ///
+  /// \a MapMetadata() always ignores this flag.
+  RF_IgnoreMissingLocals = 2,
+
+  /// Instruct the remapper to move distinct metadata instead of duplicating it
+  /// when there are module-level changes.
+  RF_MoveDistinctMDs = 4,
+
+  /// Any global values not in value map are mapped to null instead of mapping
+  /// to self.  Illegal if RF_IgnoreMissingLocals is also set.
+  RF_NullMapMissingGlobalValues = 8,
+};
+
+inline RemapFlags operator|(RemapFlags LHS, RemapFlags RHS) {
+  return RemapFlags(unsigned(LHS) | unsigned(RHS));
+}
+
+/// Context for (re-)mapping values (and metadata).
+///
+/// A shared context used for mapping and remapping of Value and Metadata
+/// instances using \a ValueToValueMapTy, \a RemapFlags, \a
+/// ValueMapTypeRemapper, and \a ValueMaterializer.
+///
+/// There are a number of top-level entry points:
+/// - \a mapValue() (and \a mapConstant());
+/// - \a mapMetadata() (and \a mapMDNode());
+/// - \a remapInstruction(); and
+/// - \a remapFunction().
+///
+/// The \a ValueMaterializer can be used as a callback, but cannot invoke any
+/// of these top-level functions recursively.  Instead, callbacks should use
+/// one of the following to schedule work lazily in the \a ValueMapper
+/// instance:
+/// - \a scheduleMapGlobalInitializer()
+/// - \a scheduleMapAppendingVariable()
+/// - \a scheduleMapGlobalAliasee()
+/// - \a scheduleRemapFunction()
+///
+/// Sometimes a callback needs a different mapping context.  Such a context can
+/// be registered using \a registerAlternateMappingContext(), which takes an
+/// alternate \a ValueToValueMapTy and \a ValueMaterializer and returns a ID to
+/// pass into the schedule*() functions.
+///
+/// TODO: lib/Linker really doesn't need the \a ValueHandle in the \a
+/// ValueToValueMapTy.  We should template \a ValueMapper (and its
+/// implementation classes), and explicitly instantiate on two concrete
+/// instances of \a ValueMap (one as \a ValueToValueMap, and one with raw \a
+/// Value pointers).  It may be viable to do away with \a TrackingMDRef in the
+/// \a Metadata side map for the lib/Linker case as well, in which case we'll
+/// need a new template parameter on \a ValueMap.
+///
+/// TODO: Update callers of \a RemapInstruction() and \a MapValue() (etc.) to
+/// use \a ValueMapper directly.
+class ValueMapper {
+  void *pImpl;
+
+public:
+  ValueMapper(ValueToValueMapTy &VM, RemapFlags Flags = RF_None,
+              ValueMapTypeRemapper *TypeMapper = nullptr,
+              ValueMaterializer *Materializer = nullptr);
+  ValueMapper(ValueMapper &&) = delete;
+  ValueMapper(const ValueMapper &) = delete;
+  ValueMapper &operator=(ValueMapper &&) = delete;
+  ValueMapper &operator=(const ValueMapper &) = delete;
+  ~ValueMapper();
+
+  /// Register an alternate mapping context.
+  ///
+  /// Returns a MappingContextID that can be used with the various schedule*()
+  /// API to switch in a different value map on-the-fly.
+  unsigned
+  registerAlternateMappingContext(ValueToValueMapTy &VM,
+                                  ValueMaterializer *Materializer = nullptr);
+
+  /// Add to the current \a RemapFlags.
+  ///
+  /// \note Like the top-level mapping functions, \a addFlags() must be called
+  /// at the top level, not during a callback in a \a ValueMaterializer.
+  void addFlags(RemapFlags Flags);
+
+  Metadata *mapMetadata(const Metadata &MD);
+  MDNode *mapMDNode(const MDNode &N);
+
+  Value *mapValue(const Value &V);
+  Constant *mapConstant(const Constant &C);
+
+  void remapInstruction(Instruction &I);
+  void remapFunction(Function &F);
+
+  void scheduleMapGlobalInitializer(GlobalVariable &GV, Constant &Init,
+                                    unsigned MappingContextID = 0);
+  void scheduleMapAppendingVariable(GlobalVariable &GV, Constant *InitPrefix,
+                                    bool IsOldCtorDtor,
+                                    ArrayRef<Constant *> NewMembers,
+                                    unsigned MappingContextID = 0);
+  void scheduleMapGlobalAliasee(GlobalAlias &GA, Constant &Aliasee,
+                                unsigned MappingContextID = 0);
+  void scheduleRemapFunction(Function &F, unsigned MappingContextID = 0);
+};
+
+/// Look up or compute a value in the value map.
+///
+/// Return a mapped value for a function-local value (Argument, Instruction,
+/// BasicBlock), or compute and memoize a value for a Constant.
+///
+///  1. If \c V is in VM, return the result.
+///  2. Else if \c V can be materialized with \c Materializer, do so, memoize
+///     it in \c VM, and return it.
+///  3. Else if \c V is a function-local value, return nullptr.
+///  4. Else if \c V is a \a GlobalValue, return \c nullptr or \c V depending
+///     on \a RF_NullMapMissingGlobalValues.
+///  5. Else if \c V is a \a MetadataAsValue wrapping a LocalAsMetadata,
+///     recurse on the local SSA value, and return nullptr or "metadata !{}" on
+///     missing depending on RF_IgnoreMissingValues.
+///  6. Else if \c V is a \a MetadataAsValue, rewrap the return of \a
+///     MapMetadata().
+///  7. Else, compute the equivalent constant, and return it.
+inline Value *MapValue(const Value *V, ValueToValueMapTy &VM,
+                       RemapFlags Flags = RF_None,
+                       ValueMapTypeRemapper *TypeMapper = nullptr,
+                       ValueMaterializer *Materializer = nullptr) {
+  return ValueMapper(VM, Flags, TypeMapper, Materializer).mapValue(*V);
+}
+
+/// Lookup or compute a mapping for a piece of metadata.
+///
+/// Compute and memoize a mapping for \c MD.
+///
+///  1. If \c MD is mapped, return it.
+///  2. Else if \a RF_NoModuleLevelChanges or \c MD is an \a MDString, return
+///     \c MD.
+///  3. Else if \c MD is a \a ConstantAsMetadata, call \a MapValue() and
+///     re-wrap its return (returning nullptr on nullptr).
+///  4. Else, \c MD is an \a MDNode.  These are remapped, along with their
+///     transitive operands.  Distinct nodes are duplicated or moved depending
+///     on \a RF_MoveDistinctNodes.  Uniqued nodes are remapped like constants.
+///
+/// \note \a LocalAsMetadata is completely unsupported by \a MapMetadata.
+/// Instead, use \a MapValue() with its wrapping \a MetadataAsValue instance.
+inline Metadata *MapMetadata(const Metadata *MD, ValueToValueMapTy &VM,
+                             RemapFlags Flags = RF_None,
+                             ValueMapTypeRemapper *TypeMapper = nullptr,
+                             ValueMaterializer *Materializer = nullptr) {
+  return ValueMapper(VM, Flags, TypeMapper, Materializer).mapMetadata(*MD);
+}
+
+/// Version of MapMetadata with type safety for MDNode.
+inline MDNode *MapMetadata(const MDNode *MD, ValueToValueMapTy &VM,
+                           RemapFlags Flags = RF_None,
+                           ValueMapTypeRemapper *TypeMapper = nullptr,
+                           ValueMaterializer *Materializer = nullptr) {
+  return ValueMapper(VM, Flags, TypeMapper, Materializer).mapMDNode(*MD);
+}
+
+/// Convert the instruction operands from referencing the current values into
+/// those specified by VM.
+///
+/// If \a RF_IgnoreMissingLocals is set and an operand can't be found via \a
+/// MapValue(), use the old value.  Otherwise assert that this doesn't happen.
+///
+/// Note that \a MapValue() only returns \c nullptr for SSA values missing from
+/// \c VM.
+inline void RemapInstruction(Instruction *I, ValueToValueMapTy &VM,
+                             RemapFlags Flags = RF_None,
+                             ValueMapTypeRemapper *TypeMapper = nullptr,
+                             ValueMaterializer *Materializer = nullptr) {
+  ValueMapper(VM, Flags, TypeMapper, Materializer).remapInstruction(*I);
+}
+
+/// Remap the operands, metadata, arguments, and instructions of a function.
+///
+/// Calls \a MapValue() on prefix data, prologue data, and personality
+/// function; calls \a MapMetadata() on each attached MDNode; remaps the
+/// argument types using the provided \c TypeMapper; and calls \a
+/// RemapInstruction() on every instruction.
+inline void RemapFunction(Function &F, ValueToValueMapTy &VM,
+                          RemapFlags Flags = RF_None,
+                          ValueMapTypeRemapper *TypeMapper = nullptr,
+                          ValueMaterializer *Materializer = nullptr) {
+  ValueMapper(VM, Flags, TypeMapper, Materializer).remapFunction(F);
+}
+
+/// Version of MapValue with type safety for Constant.
+inline Constant *MapValue(const Constant *V, ValueToValueMapTy &VM,
+                          RemapFlags Flags = RF_None,
+                          ValueMapTypeRemapper *TypeMapper = nullptr,
+                          ValueMaterializer *Materializer = nullptr) {
+  return ValueMapper(VM, Flags, TypeMapper, Materializer).mapConstant(*V);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Vectorize.h b/linux-x64/clang/include/llvm/Transforms/Vectorize.h
new file mode 100644
index 0000000..19845e4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Vectorize.h
@@ -0,0 +1,144 @@
+//===-- Vectorize.h - Vectorization Transformations -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the Vectorize transformations library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_VECTORIZE_H
+#define LLVM_TRANSFORMS_VECTORIZE_H
+
+namespace llvm {
+class BasicBlock;
+class BasicBlockPass;
+class Pass;
+
+//===----------------------------------------------------------------------===//
+/// @brief Vectorize configuration.
+struct VectorizeConfig {
+  //===--------------------------------------------------------------------===//
+  // Target architecture related parameters
+
+  /// @brief The size of the native vector registers.
+  unsigned VectorBits;
+
+  /// @brief Vectorize boolean values.
+  bool VectorizeBools;
+
+  /// @brief Vectorize integer values.
+  bool VectorizeInts;
+
+  /// @brief Vectorize floating-point values.
+  bool VectorizeFloats;
+
+  /// @brief Vectorize pointer values.
+  bool VectorizePointers;
+
+  /// @brief Vectorize casting (conversion) operations.
+  bool VectorizeCasts;
+
+  /// @brief Vectorize floating-point math intrinsics.
+  bool VectorizeMath;
+
+  /// @brief Vectorize bit intrinsics.
+  bool VectorizeBitManipulations;
+
+  /// @brief Vectorize the fused-multiply-add intrinsic.
+  bool VectorizeFMA;
+
+  /// @brief Vectorize select instructions.
+  bool VectorizeSelect;
+
+  /// @brief Vectorize comparison instructions.
+  bool VectorizeCmp;
+
+  /// @brief Vectorize getelementptr instructions.
+  bool VectorizeGEP;
+
+  /// @brief Vectorize loads and stores.
+  bool VectorizeMemOps;
+
+  /// @brief Only generate aligned loads and stores.
+  bool AlignedOnly;
+
+  //===--------------------------------------------------------------------===//
+  // Misc parameters
+
+  /// @brief The required chain depth for vectorization.
+  unsigned ReqChainDepth;
+
+  /// @brief The maximum search distance for instruction pairs.
+  unsigned SearchLimit;
+
+  /// @brief The maximum number of candidate pairs with which to use a full
+  ///        cycle check.
+  unsigned MaxCandPairsForCycleCheck;
+
+  /// @brief Replicating one element to a pair breaks the chain.
+  bool SplatBreaksChain;
+
+  /// @brief The maximum number of pairable instructions per group.
+  unsigned MaxInsts;
+
+  /// @brief The maximum number of candidate instruction pairs per group.
+  unsigned MaxPairs;
+
+  /// @brief The maximum number of pairing iterations.
+  unsigned MaxIter;
+
+  /// @brief Don't try to form odd-length vectors.
+  bool Pow2LenOnly;
+
+  /// @brief Don't boost the chain-depth contribution of loads and stores.
+  bool NoMemOpBoost;
+
+  /// @brief Use a fast instruction dependency analysis.
+  bool FastDep;
+
+  /// @brief Initialize the VectorizeConfig from command line options.
+  VectorizeConfig();
+};
+
+//===----------------------------------------------------------------------===//
+//
+// LoopVectorize - Create a loop vectorization pass.
+//
+Pass *createLoopVectorizePass(bool NoUnrolling = false,
+                              bool AlwaysVectorize = true);
+
+//===----------------------------------------------------------------------===//
+//
+// SLPVectorizer - Create a bottom-up SLP vectorizer pass.
+//
+Pass *createSLPVectorizerPass();
+
+//===----------------------------------------------------------------------===//
+/// @brief Vectorize the BasicBlock.
+///
+/// @param BB The BasicBlock to be vectorized
+/// @param P  The current running pass, should require AliasAnalysis and
+///           ScalarEvolution. After the vectorization, AliasAnalysis,
+///           ScalarEvolution and CFG are preserved.
+///
+/// @return True if the BB is changed, false otherwise.
+///
+bool vectorizeBasicBlock(Pass *P, BasicBlock &BB,
+                         const VectorizeConfig &C = VectorizeConfig());
+
+//===----------------------------------------------------------------------===//
+//
+// LoadStoreVectorizer - Create vector loads and stores, but leave scalar
+// operations.
+//
+Pass *createLoadStoreVectorizerPass();
+
+} // End llvm namespace
+
+#endif
diff --git a/linux-x64/clang/include/llvm/Transforms/Vectorize/LoopVectorize.h b/linux-x64/clang/include/llvm/Transforms/Vectorize/LoopVectorize.h
new file mode 100644
index 0000000..32b56d3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Vectorize/LoopVectorize.h
@@ -0,0 +1,107 @@
+//===- LoopVectorize.h ------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
+// and generates target-independent LLVM-IR.
+// The vectorizer uses the TargetTransformInfo analysis to estimate the costs
+// of instructions in order to estimate the profitability of vectorization.
+//
+// The loop vectorizer combines consecutive loop iterations into a single
+// 'wide' iteration. After this transformation the index is incremented
+// by the SIMD vector width, and not by one.
+//
+// This pass has three parts:
+// 1. The main loop pass that drives the different parts.
+// 2. LoopVectorizationLegality - A unit that checks for the legality
+//    of the vectorization.
+// 3. InnerLoopVectorizer - A unit that performs the actual
+//    widening of instructions.
+// 4. LoopVectorizationCostModel - A unit that checks for the profitability
+//    of vectorization. It decides on the optimal vector width, which
+//    can be one, if vectorization is not profitable.
+//
+//===----------------------------------------------------------------------===//
+//
+// The reduction-variable vectorization is based on the paper:
+//  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
+//
+// Variable uniformity checks are inspired by:
+//  Karrenberg, R. and Hack, S. Whole Function Vectorization.
+//
+// The interleaved access vectorization is based on the paper:
+//  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
+//  Data for SIMD
+//
+// Other ideas/concepts are from:
+//  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
+//
+//  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
+//  Vectorizing Compilers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZE_H
+#define LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZE_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/PassManager.h"
+#include <functional>
+
+namespace llvm {
+
+class AssumptionCache;
+class BlockFrequencyInfo;
+class DemandedBits;
+class DominatorTree;
+class Function;
+class Loop;
+class LoopAccessInfo;
+class LoopInfo;
+class OptimizationRemarkEmitter;
+class ScalarEvolution;
+class TargetLibraryInfo;
+class TargetTransformInfo;
+
+/// The LoopVectorize Pass.
+struct LoopVectorizePass : public PassInfoMixin<LoopVectorizePass> {
+  bool DisableUnrolling = false;
+
+  /// If true, consider all loops for vectorization.
+  /// If false, only loops that explicitly request vectorization are
+  /// considered.
+  bool AlwaysVectorize = true;
+
+  ScalarEvolution *SE;
+  LoopInfo *LI;
+  TargetTransformInfo *TTI;
+  DominatorTree *DT;
+  BlockFrequencyInfo *BFI;
+  TargetLibraryInfo *TLI;
+  DemandedBits *DB;
+  AliasAnalysis *AA;
+  AssumptionCache *AC;
+  std::function<const LoopAccessInfo &(Loop &)> *GetLAA;
+  OptimizationRemarkEmitter *ORE;
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  // Shim for old PM.
+  bool runImpl(Function &F, ScalarEvolution &SE_, LoopInfo &LI_,
+               TargetTransformInfo &TTI_, DominatorTree &DT_,
+               BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
+               DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_,
+               std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
+               OptimizationRemarkEmitter &ORE);
+
+  bool processLoop(Loop *L);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZE_H
diff --git a/linux-x64/clang/include/llvm/Transforms/Vectorize/SLPVectorizer.h b/linux-x64/clang/include/llvm/Transforms/Vectorize/SLPVectorizer.h
new file mode 100644
index 0000000..979d5ef
--- /dev/null
+++ b/linux-x64/clang/include/llvm/Transforms/Vectorize/SLPVectorizer.h
@@ -0,0 +1,154 @@
+//===- SLPVectorizer.h ------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This pass implements the Bottom Up SLP vectorizer. It detects consecutive
+// stores that can be put together into vector-stores. Next, it attempts to
+// construct vectorizable tree using the use-def chains. If a profitable tree
+// was found, the SLP vectorizer performs vectorization on the tree.
+//
+// The pass is inspired by the work described in the paper:
+//  "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_VECTORIZE_SLPVECTORIZER_H
+#define LLVM_TRANSFORMS_VECTORIZE_SLPVECTORIZER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/ValueHandle.h"
+
+namespace llvm {
+
+class AssumptionCache;
+class BasicBlock;
+class CmpInst;
+class DataLayout;
+class DemandedBits;
+class DominatorTree;
+class Function;
+class InsertElementInst;
+class InsertValueInst;
+class Instruction;
+class LoopInfo;
+class OptimizationRemarkEmitter;
+class PHINode;
+class ScalarEvolution;
+class StoreInst;
+class TargetLibraryInfo;
+class TargetTransformInfo;
+class Value;
+
+/// A private "module" namespace for types and utilities used by this pass.
+/// These are implementation details and should not be used by clients.
+namespace slpvectorizer {
+
+class BoUpSLP;
+
+} // end namespace slpvectorizer
+
+struct SLPVectorizerPass : public PassInfoMixin<SLPVectorizerPass> {
+  using StoreList = SmallVector<StoreInst *, 8>;
+  using StoreListMap = MapVector<Value *, StoreList>;
+  using WeakTrackingVHList = SmallVector<WeakTrackingVH, 8>;
+  using WeakTrackingVHListMap = MapVector<Value *, WeakTrackingVHList>;
+
+  ScalarEvolution *SE = nullptr;
+  TargetTransformInfo *TTI = nullptr;
+  TargetLibraryInfo *TLI = nullptr;
+  AliasAnalysis *AA = nullptr;
+  LoopInfo *LI = nullptr;
+  DominatorTree *DT = nullptr;
+  AssumptionCache *AC = nullptr;
+  DemandedBits *DB = nullptr;
+  const DataLayout *DL = nullptr;
+
+public:
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+  // Glue for old PM.
+  bool runImpl(Function &F, ScalarEvolution *SE_, TargetTransformInfo *TTI_,
+               TargetLibraryInfo *TLI_, AliasAnalysis *AA_, LoopInfo *LI_,
+               DominatorTree *DT_, AssumptionCache *AC_, DemandedBits *DB_,
+               OptimizationRemarkEmitter *ORE_);
+
+private:
+  /// \brief Collect store and getelementptr instructions and organize them
+  /// according to the underlying object of their pointer operands. We sort the
+  /// instructions by their underlying objects to reduce the cost of
+  /// consecutive access queries.
+  ///
+  /// TODO: We can further reduce this cost if we flush the chain creation
+  ///       every time we run into a memory barrier.
+  void collectSeedInstructions(BasicBlock *BB);
+
+  /// \brief Try to vectorize a chain that starts at two arithmetic instrs.
+  bool tryToVectorizePair(Value *A, Value *B, slpvectorizer::BoUpSLP &R);
+
+  /// \brief Try to vectorize a list of operands.
+  /// \param UserCost Cost of the user operations of \p VL if they may affect
+  /// the cost of the vectorization.
+  /// \returns true if a value was vectorized.
+  bool tryToVectorizeList(ArrayRef<Value *> VL, slpvectorizer::BoUpSLP &R,
+                          int UserCost = 0, bool AllowReorder = false);
+
+  /// \brief Try to vectorize a chain that may start at the operands of \p I.
+  bool tryToVectorize(Instruction *I, slpvectorizer::BoUpSLP &R);
+
+  /// \brief Vectorize the store instructions collected in Stores.
+  bool vectorizeStoreChains(slpvectorizer::BoUpSLP &R);
+
+  /// \brief Vectorize the index computations of the getelementptr instructions
+  /// collected in GEPs.
+  bool vectorizeGEPIndices(BasicBlock *BB, slpvectorizer::BoUpSLP &R);
+
+  /// Try to find horizontal reduction or otherwise vectorize a chain of binary
+  /// operators.
+  bool vectorizeRootInstruction(PHINode *P, Value *V, BasicBlock *BB,
+                                slpvectorizer::BoUpSLP &R,
+                                TargetTransformInfo *TTI);
+
+  /// Try to vectorize trees that start at insertvalue instructions.
+  bool vectorizeInsertValueInst(InsertValueInst *IVI, BasicBlock *BB,
+                                slpvectorizer::BoUpSLP &R);
+
+  /// Try to vectorize trees that start at insertelement instructions.
+  bool vectorizeInsertElementInst(InsertElementInst *IEI, BasicBlock *BB,
+                                  slpvectorizer::BoUpSLP &R);
+
+  /// Try to vectorize trees that start at compare instructions.
+  bool vectorizeCmpInst(CmpInst *CI, BasicBlock *BB, slpvectorizer::BoUpSLP &R);
+
+  /// Tries to vectorize constructs started from CmpInst, InsertValueInst or
+  /// InsertElementInst instructions.
+  bool vectorizeSimpleInstructions(SmallVectorImpl<WeakVH> &Instructions,
+                                   BasicBlock *BB, slpvectorizer::BoUpSLP &R);
+
+  /// \brief Scan the basic block and look for patterns that are likely to start
+  /// a vectorization chain.
+  bool vectorizeChainsInBlock(BasicBlock *BB, slpvectorizer::BoUpSLP &R);
+
+  bool vectorizeStoreChain(ArrayRef<Value *> Chain, slpvectorizer::BoUpSLP &R,
+                           unsigned VecRegSize);
+
+  bool vectorizeStores(ArrayRef<StoreInst *> Stores, slpvectorizer::BoUpSLP &R);
+
+  /// The store instructions in a basic block organized by base pointer.
+  StoreListMap Stores;
+
+  /// The getelementptr instructions in a basic block organized by base pointer.
+  WeakTrackingVHListMap GEPs;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_VECTORIZE_SLPVECTORIZER_H