Import prebuilt clang toolchain for linux.
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/ExecutionEngine.h b/linux-x64/clang/include/llvm/ExecutionEngine/ExecutionEngine.h
new file mode 100644
index 0000000..7932688
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/ExecutionEngine.h
@@ -0,0 +1,667 @@
+//===- ExecutionEngine.h - Abstract Execution Engine Interface --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the abstract interface that implements execution support
+// for LLVM.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_EXECUTIONENGINE_H
+#define LLVM_EXECUTIONENGINE_EXECUTIONENGINE_H
+
+#include "llvm-c/ExecutionEngine.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include <algorithm>
+#include <cstdint>
+#include <functional>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class Constant;
+class Function;
+struct GenericValue;
+class GlobalValue;
+class GlobalVariable;
+class JITEventListener;
+class MCJITMemoryManager;
+class ObjectCache;
+class RTDyldMemoryManager;
+class Triple;
+class Type;
+
+namespace object {
+
+class Archive;
+class ObjectFile;
+
+} // end namespace object
+
+/// \brief Helper class for helping synchronize access to the global address map
+/// table. Access to this class should be serialized under a mutex.
+class ExecutionEngineState {
+public:
+ using GlobalAddressMapTy = StringMap<uint64_t>;
+
+private:
+ /// GlobalAddressMap - A mapping between LLVM global symbol names values and
+ /// their actualized version...
+ GlobalAddressMapTy GlobalAddressMap;
+
+ /// GlobalAddressReverseMap - This is the reverse mapping of GlobalAddressMap,
+ /// used to convert raw addresses into the LLVM global value that is emitted
+ /// at the address. This map is not computed unless getGlobalValueAtAddress
+ /// is called at some point.
+ std::map<uint64_t, std::string> GlobalAddressReverseMap;
+
+public:
+ GlobalAddressMapTy &getGlobalAddressMap() {
+ return GlobalAddressMap;
+ }
+
+ std::map<uint64_t, std::string> &getGlobalAddressReverseMap() {
+ return GlobalAddressReverseMap;
+ }
+
+ /// \brief Erase an entry from the mapping table.
+ ///
+ /// \returns The address that \p ToUnmap was happed to.
+ uint64_t RemoveMapping(StringRef Name);
+};
+
+using FunctionCreator = std::function<void *(const std::string &)>;
+
+/// \brief Abstract interface for implementation execution of LLVM modules,
+/// designed to support both interpreter and just-in-time (JIT) compiler
+/// implementations.
+class ExecutionEngine {
+ /// The state object holding the global address mapping, which must be
+ /// accessed synchronously.
+ //
+ // FIXME: There is no particular need the entire map needs to be
+ // synchronized. Wouldn't a reader-writer design be better here?
+ ExecutionEngineState EEState;
+
+ /// The target data for the platform for which execution is being performed.
+ ///
+ /// Note: the DataLayout is LLVMContext specific because it has an
+ /// internal cache based on type pointers. It makes unsafe to reuse the
+ /// ExecutionEngine across context, we don't enforce this rule but undefined
+ /// behavior can occurs if the user tries to do it.
+ const DataLayout DL;
+
+ /// Whether lazy JIT compilation is enabled.
+ bool CompilingLazily;
+
+ /// Whether JIT compilation of external global variables is allowed.
+ bool GVCompilationDisabled;
+
+ /// Whether the JIT should perform lookups of external symbols (e.g.,
+ /// using dlsym).
+ bool SymbolSearchingDisabled;
+
+ /// Whether the JIT should verify IR modules during compilation.
+ bool VerifyModules;
+
+ friend class EngineBuilder; // To allow access to JITCtor and InterpCtor.
+
+protected:
+ /// The list of Modules that we are JIT'ing from. We use a SmallVector to
+ /// optimize for the case where there is only one module.
+ SmallVector<std::unique_ptr<Module>, 1> Modules;
+
+ /// getMemoryforGV - Allocate memory for a global variable.
+ virtual char *getMemoryForGV(const GlobalVariable *GV);
+
+ static ExecutionEngine *(*MCJITCtor)(
+ std::unique_ptr<Module> M, std::string *ErrorStr,
+ std::shared_ptr<MCJITMemoryManager> MM,
+ std::shared_ptr<LegacyJITSymbolResolver> SR,
+ std::unique_ptr<TargetMachine> TM);
+
+ static ExecutionEngine *(*OrcMCJITReplacementCtor)(
+ std::string *ErrorStr, std::shared_ptr<MCJITMemoryManager> MM,
+ std::shared_ptr<LegacyJITSymbolResolver> SR,
+ std::unique_ptr<TargetMachine> TM);
+
+ static ExecutionEngine *(*InterpCtor)(std::unique_ptr<Module> M,
+ std::string *ErrorStr);
+
+ /// LazyFunctionCreator - If an unknown function is needed, this function
+ /// pointer is invoked to create it. If this returns null, the JIT will
+ /// abort.
+ FunctionCreator LazyFunctionCreator;
+
+ /// getMangledName - Get mangled name.
+ std::string getMangledName(const GlobalValue *GV);
+
+public:
+ /// lock - This lock protects the ExecutionEngine and MCJIT classes. It must
+ /// be held while changing the internal state of any of those classes.
+ sys::Mutex lock;
+
+ //===--------------------------------------------------------------------===//
+ // ExecutionEngine Startup
+ //===--------------------------------------------------------------------===//
+
+ virtual ~ExecutionEngine();
+
+ /// Add a Module to the list of modules that we can JIT from.
+ virtual void addModule(std::unique_ptr<Module> M) {
+ Modules.push_back(std::move(M));
+ }
+
+ /// addObjectFile - Add an ObjectFile to the execution engine.
+ ///
+ /// This method is only supported by MCJIT. MCJIT will immediately load the
+ /// object into memory and adds its symbols to the list used to resolve
+ /// external symbols while preparing other objects for execution.
+ ///
+ /// Objects added using this function will not be made executable until
+ /// needed by another object.
+ ///
+ /// MCJIT will take ownership of the ObjectFile.
+ virtual void addObjectFile(std::unique_ptr<object::ObjectFile> O);
+ virtual void addObjectFile(object::OwningBinary<object::ObjectFile> O);
+
+ /// addArchive - Add an Archive to the execution engine.
+ ///
+ /// This method is only supported by MCJIT. MCJIT will use the archive to
+ /// resolve external symbols in objects it is loading. If a symbol is found
+ /// in the Archive the contained object file will be extracted (in memory)
+ /// and loaded for possible execution.
+ virtual void addArchive(object::OwningBinary<object::Archive> A);
+
+ //===--------------------------------------------------------------------===//
+
+ const DataLayout &getDataLayout() const { return DL; }
+
+ /// removeModule - Removes a Module from the list of modules, but does not
+ /// free the module's memory. Returns true if M is found, in which case the
+ /// caller assumes responsibility for deleting the module.
+ //
+ // FIXME: This stealth ownership transfer is horrible. This will probably be
+ // fixed by deleting ExecutionEngine.
+ virtual bool removeModule(Module *M);
+
+ /// FindFunctionNamed - Search all of the active modules to find the function that
+ /// defines FnName. This is very slow operation and shouldn't be used for
+ /// general code.
+ virtual Function *FindFunctionNamed(StringRef FnName);
+
+ /// FindGlobalVariableNamed - Search all of the active modules to find the global variable
+ /// that defines Name. This is very slow operation and shouldn't be used for
+ /// general code.
+ virtual GlobalVariable *FindGlobalVariableNamed(StringRef Name, bool AllowInternal = false);
+
+ /// runFunction - Execute the specified function with the specified arguments,
+ /// and return the result.
+ ///
+ /// For MCJIT execution engines, clients are encouraged to use the
+ /// "GetFunctionAddress" method (rather than runFunction) and cast the
+ /// returned uint64_t to the desired function pointer type. However, for
+ /// backwards compatibility MCJIT's implementation can execute 'main-like'
+ /// function (i.e. those returning void or int, and taking either no
+ /// arguments or (int, char*[])).
+ virtual GenericValue runFunction(Function *F,
+ ArrayRef<GenericValue> ArgValues) = 0;
+
+ /// getPointerToNamedFunction - This method returns the address of the
+ /// specified function by using the dlsym function call. As such it is only
+ /// useful for resolving library symbols, not code generated symbols.
+ ///
+ /// If AbortOnFailure is false and no function with the given name is
+ /// found, this function silently returns a null pointer. Otherwise,
+ /// it prints a message to stderr and aborts.
+ ///
+ /// This function is deprecated for the MCJIT execution engine.
+ virtual void *getPointerToNamedFunction(StringRef Name,
+ bool AbortOnFailure = true) = 0;
+
+ /// mapSectionAddress - map a section to its target address space value.
+ /// Map the address of a JIT section as returned from the memory manager
+ /// to the address in the target process as the running code will see it.
+ /// This is the address which will be used for relocation resolution.
+ virtual void mapSectionAddress(const void *LocalAddress,
+ uint64_t TargetAddress) {
+ llvm_unreachable("Re-mapping of section addresses not supported with this "
+ "EE!");
+ }
+
+ /// generateCodeForModule - Run code generation for the specified module and
+ /// load it into memory.
+ ///
+ /// When this function has completed, all code and data for the specified
+ /// module, and any module on which this module depends, will be generated
+ /// and loaded into memory, but relocations will not yet have been applied
+ /// and all memory will be readable and writable but not executable.
+ ///
+ /// This function is primarily useful when generating code for an external
+ /// target, allowing the client an opportunity to remap section addresses
+ /// before relocations are applied. Clients that intend to execute code
+ /// locally can use the getFunctionAddress call, which will generate code
+ /// and apply final preparations all in one step.
+ ///
+ /// This method has no effect for the interpeter.
+ virtual void generateCodeForModule(Module *M) {}
+
+ /// finalizeObject - ensure the module is fully processed and is usable.
+ ///
+ /// It is the user-level function for completing the process of making the
+ /// object usable for execution. It should be called after sections within an
+ /// object have been relocated using mapSectionAddress. When this method is
+ /// called the MCJIT execution engine will reapply relocations for a loaded
+ /// object. This method has no effect for the interpeter.
+ virtual void finalizeObject() {}
+
+ /// runStaticConstructorsDestructors - This method is used to execute all of
+ /// the static constructors or destructors for a program.
+ ///
+ /// \param isDtors - Run the destructors instead of constructors.
+ virtual void runStaticConstructorsDestructors(bool isDtors);
+
+ /// This method is used to execute all of the static constructors or
+ /// destructors for a particular module.
+ ///
+ /// \param isDtors - Run the destructors instead of constructors.
+ void runStaticConstructorsDestructors(Module &module, bool isDtors);
+
+
+ /// runFunctionAsMain - This is a helper function which wraps runFunction to
+ /// handle the common task of starting up main with the specified argc, argv,
+ /// and envp parameters.
+ int runFunctionAsMain(Function *Fn, const std::vector<std::string> &argv,
+ const char * const * envp);
+
+
+ /// addGlobalMapping - Tell the execution engine that the specified global is
+ /// at the specified location. This is used internally as functions are JIT'd
+ /// and as global variables are laid out in memory. It can and should also be
+ /// used by clients of the EE that want to have an LLVM global overlay
+ /// existing data in memory. Values to be mapped should be named, and have
+ /// external or weak linkage. Mappings are automatically removed when their
+ /// GlobalValue is destroyed.
+ void addGlobalMapping(const GlobalValue *GV, void *Addr);
+ void addGlobalMapping(StringRef Name, uint64_t Addr);
+
+ /// clearAllGlobalMappings - Clear all global mappings and start over again,
+ /// for use in dynamic compilation scenarios to move globals.
+ void clearAllGlobalMappings();
+
+ /// clearGlobalMappingsFromModule - Clear all global mappings that came from a
+ /// particular module, because it has been removed from the JIT.
+ void clearGlobalMappingsFromModule(Module *M);
+
+ /// updateGlobalMapping - Replace an existing mapping for GV with a new
+ /// address. This updates both maps as required. If "Addr" is null, the
+ /// entry for the global is removed from the mappings. This returns the old
+ /// value of the pointer, or null if it was not in the map.
+ uint64_t updateGlobalMapping(const GlobalValue *GV, void *Addr);
+ uint64_t updateGlobalMapping(StringRef Name, uint64_t Addr);
+
+ /// getAddressToGlobalIfAvailable - This returns the address of the specified
+ /// global symbol.
+ uint64_t getAddressToGlobalIfAvailable(StringRef S);
+
+ /// getPointerToGlobalIfAvailable - This returns the address of the specified
+ /// global value if it is has already been codegen'd, otherwise it returns
+ /// null.
+ void *getPointerToGlobalIfAvailable(StringRef S);
+ void *getPointerToGlobalIfAvailable(const GlobalValue *GV);
+
+ /// getPointerToGlobal - This returns the address of the specified global
+ /// value. This may involve code generation if it's a function.
+ ///
+ /// This function is deprecated for the MCJIT execution engine. Use
+ /// getGlobalValueAddress instead.
+ void *getPointerToGlobal(const GlobalValue *GV);
+
+ /// getPointerToFunction - The different EE's represent function bodies in
+ /// different ways. They should each implement this to say what a function
+ /// pointer should look like. When F is destroyed, the ExecutionEngine will
+ /// remove its global mapping and free any machine code. Be sure no threads
+ /// are running inside F when that happens.
+ ///
+ /// This function is deprecated for the MCJIT execution engine. Use
+ /// getFunctionAddress instead.
+ virtual void *getPointerToFunction(Function *F) = 0;
+
+ /// getPointerToFunctionOrStub - If the specified function has been
+ /// code-gen'd, return a pointer to the function. If not, compile it, or use
+ /// a stub to implement lazy compilation if available. See
+ /// getPointerToFunction for the requirements on destroying F.
+ ///
+ /// This function is deprecated for the MCJIT execution engine. Use
+ /// getFunctionAddress instead.
+ virtual void *getPointerToFunctionOrStub(Function *F) {
+ // Default implementation, just codegen the function.
+ return getPointerToFunction(F);
+ }
+
+ /// getGlobalValueAddress - Return the address of the specified global
+ /// value. This may involve code generation.
+ ///
+ /// This function should not be called with the interpreter engine.
+ virtual uint64_t getGlobalValueAddress(const std::string &Name) {
+ // Default implementation for the interpreter. MCJIT will override this.
+ // JIT and interpreter clients should use getPointerToGlobal instead.
+ return 0;
+ }
+
+ /// getFunctionAddress - Return the address of the specified function.
+ /// This may involve code generation.
+ virtual uint64_t getFunctionAddress(const std::string &Name) {
+ // Default implementation for the interpreter. MCJIT will override this.
+ // Interpreter clients should use getPointerToFunction instead.
+ return 0;
+ }
+
+ /// getGlobalValueAtAddress - Return the LLVM global value object that starts
+ /// at the specified address.
+ ///
+ const GlobalValue *getGlobalValueAtAddress(void *Addr);
+
+ /// StoreValueToMemory - Stores the data in Val of type Ty at address Ptr.
+ /// Ptr is the address of the memory at which to store Val, cast to
+ /// GenericValue *. It is not a pointer to a GenericValue containing the
+ /// address at which to store Val.
+ void StoreValueToMemory(const GenericValue &Val, GenericValue *Ptr,
+ Type *Ty);
+
+ void InitializeMemory(const Constant *Init, void *Addr);
+
+ /// getOrEmitGlobalVariable - Return the address of the specified global
+ /// variable, possibly emitting it to memory if needed. This is used by the
+ /// Emitter.
+ ///
+ /// This function is deprecated for the MCJIT execution engine. Use
+ /// getGlobalValueAddress instead.
+ virtual void *getOrEmitGlobalVariable(const GlobalVariable *GV) {
+ return getPointerToGlobal((const GlobalValue *)GV);
+ }
+
+ /// Registers a listener to be called back on various events within
+ /// the JIT. See JITEventListener.h for more details. Does not
+ /// take ownership of the argument. The argument may be NULL, in
+ /// which case these functions do nothing.
+ virtual void RegisterJITEventListener(JITEventListener *) {}
+ virtual void UnregisterJITEventListener(JITEventListener *) {}
+
+ /// Sets the pre-compiled object cache. The ownership of the ObjectCache is
+ /// not changed. Supported by MCJIT but not the interpreter.
+ virtual void setObjectCache(ObjectCache *) {
+ llvm_unreachable("No support for an object cache");
+ }
+
+ /// setProcessAllSections (MCJIT Only): By default, only sections that are
+ /// "required for execution" are passed to the RTDyldMemoryManager, and other
+ /// sections are discarded. Passing 'true' to this method will cause
+ /// RuntimeDyld to pass all sections to its RTDyldMemoryManager regardless
+ /// of whether they are "required to execute" in the usual sense.
+ ///
+ /// Rationale: Some MCJIT clients want to be able to inspect metadata
+ /// sections (e.g. Dwarf, Stack-maps) to enable functionality or analyze
+ /// performance. Passing these sections to the memory manager allows the
+ /// client to make policy about the relevant sections, rather than having
+ /// MCJIT do it.
+ virtual void setProcessAllSections(bool ProcessAllSections) {
+ llvm_unreachable("No support for ProcessAllSections option");
+ }
+
+ /// Return the target machine (if available).
+ virtual TargetMachine *getTargetMachine() { return nullptr; }
+
+ /// DisableLazyCompilation - When lazy compilation is off (the default), the
+ /// JIT will eagerly compile every function reachable from the argument to
+ /// getPointerToFunction. If lazy compilation is turned on, the JIT will only
+ /// compile the one function and emit stubs to compile the rest when they're
+ /// first called. If lazy compilation is turned off again while some lazy
+ /// stubs are still around, and one of those stubs is called, the program will
+ /// abort.
+ ///
+ /// In order to safely compile lazily in a threaded program, the user must
+ /// ensure that 1) only one thread at a time can call any particular lazy
+ /// stub, and 2) any thread modifying LLVM IR must hold the JIT's lock
+ /// (ExecutionEngine::lock) or otherwise ensure that no other thread calls a
+ /// lazy stub. See http://llvm.org/PR5184 for details.
+ void DisableLazyCompilation(bool Disabled = true) {
+ CompilingLazily = !Disabled;
+ }
+ bool isCompilingLazily() const {
+ return CompilingLazily;
+ }
+
+ /// DisableGVCompilation - If called, the JIT will abort if it's asked to
+ /// allocate space and populate a GlobalVariable that is not internal to
+ /// the module.
+ void DisableGVCompilation(bool Disabled = true) {
+ GVCompilationDisabled = Disabled;
+ }
+ bool isGVCompilationDisabled() const {
+ return GVCompilationDisabled;
+ }
+
+ /// DisableSymbolSearching - If called, the JIT will not try to lookup unknown
+ /// symbols with dlsym. A client can still use InstallLazyFunctionCreator to
+ /// resolve symbols in a custom way.
+ void DisableSymbolSearching(bool Disabled = true) {
+ SymbolSearchingDisabled = Disabled;
+ }
+ bool isSymbolSearchingDisabled() const {
+ return SymbolSearchingDisabled;
+ }
+
+ /// Enable/Disable IR module verification.
+ ///
+ /// Note: Module verification is enabled by default in Debug builds, and
+ /// disabled by default in Release. Use this method to override the default.
+ void setVerifyModules(bool Verify) {
+ VerifyModules = Verify;
+ }
+ bool getVerifyModules() const {
+ return VerifyModules;
+ }
+
+ /// InstallLazyFunctionCreator - If an unknown function is needed, the
+ /// specified function pointer is invoked to create it. If it returns null,
+ /// the JIT will abort.
+ void InstallLazyFunctionCreator(FunctionCreator C) {
+ LazyFunctionCreator = std::move(C);
+ }
+
+protected:
+ ExecutionEngine(DataLayout DL) : DL(std::move(DL)) {}
+ explicit ExecutionEngine(DataLayout DL, std::unique_ptr<Module> M);
+ explicit ExecutionEngine(std::unique_ptr<Module> M);
+
+ void emitGlobals();
+
+ void EmitGlobalVariable(const GlobalVariable *GV);
+
+ GenericValue getConstantValue(const Constant *C);
+ void LoadValueFromMemory(GenericValue &Result, GenericValue *Ptr,
+ Type *Ty);
+
+private:
+ void Init(std::unique_ptr<Module> M);
+};
+
+namespace EngineKind {
+
+ // These are actually bitmasks that get or-ed together.
+ enum Kind {
+ JIT = 0x1,
+ Interpreter = 0x2
+ };
+ const static Kind Either = (Kind)(JIT | Interpreter);
+
+} // end namespace EngineKind
+
+/// Builder class for ExecutionEngines. Use this by stack-allocating a builder,
+/// chaining the various set* methods, and terminating it with a .create()
+/// call.
+class EngineBuilder {
+private:
+ std::unique_ptr<Module> M;
+ EngineKind::Kind WhichEngine;
+ std::string *ErrorStr;
+ CodeGenOpt::Level OptLevel;
+ std::shared_ptr<MCJITMemoryManager> MemMgr;
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver;
+ TargetOptions Options;
+ Optional<Reloc::Model> RelocModel;
+ Optional<CodeModel::Model> CMModel;
+ std::string MArch;
+ std::string MCPU;
+ SmallVector<std::string, 4> MAttrs;
+ bool VerifyModules;
+ bool UseOrcMCJITReplacement;
+ bool EmulatedTLS = true;
+
+public:
+ /// Default constructor for EngineBuilder.
+ EngineBuilder();
+
+ /// Constructor for EngineBuilder.
+ EngineBuilder(std::unique_ptr<Module> M);
+
+ // Out-of-line since we don't have the def'n of RTDyldMemoryManager here.
+ ~EngineBuilder();
+
+ /// setEngineKind - Controls whether the user wants the interpreter, the JIT,
+ /// or whichever engine works. This option defaults to EngineKind::Either.
+ EngineBuilder &setEngineKind(EngineKind::Kind w) {
+ WhichEngine = w;
+ return *this;
+ }
+
+ /// setMCJITMemoryManager - Sets the MCJIT memory manager to use. This allows
+ /// clients to customize their memory allocation policies for the MCJIT. This
+ /// is only appropriate for the MCJIT; setting this and configuring the builder
+ /// to create anything other than MCJIT will cause a runtime error. If create()
+ /// is called and is successful, the created engine takes ownership of the
+ /// memory manager. This option defaults to NULL.
+ EngineBuilder &setMCJITMemoryManager(std::unique_ptr<RTDyldMemoryManager> mcjmm);
+
+ EngineBuilder&
+ setMemoryManager(std::unique_ptr<MCJITMemoryManager> MM);
+
+ EngineBuilder &setSymbolResolver(std::unique_ptr<LegacyJITSymbolResolver> SR);
+
+ /// setErrorStr - Set the error string to write to on error. This option
+ /// defaults to NULL.
+ EngineBuilder &setErrorStr(std::string *e) {
+ ErrorStr = e;
+ return *this;
+ }
+
+ /// setOptLevel - Set the optimization level for the JIT. This option
+ /// defaults to CodeGenOpt::Default.
+ EngineBuilder &setOptLevel(CodeGenOpt::Level l) {
+ OptLevel = l;
+ return *this;
+ }
+
+ /// setTargetOptions - Set the target options that the ExecutionEngine
+ /// target is using. Defaults to TargetOptions().
+ EngineBuilder &setTargetOptions(const TargetOptions &Opts) {
+ Options = Opts;
+ return *this;
+ }
+
+ /// setRelocationModel - Set the relocation model that the ExecutionEngine
+ /// target is using. Defaults to target specific default "Reloc::Default".
+ EngineBuilder &setRelocationModel(Reloc::Model RM) {
+ RelocModel = RM;
+ return *this;
+ }
+
+ /// setCodeModel - Set the CodeModel that the ExecutionEngine target
+ /// data is using. Defaults to target specific default
+ /// "CodeModel::JITDefault".
+ EngineBuilder &setCodeModel(CodeModel::Model M) {
+ CMModel = M;
+ return *this;
+ }
+
+ /// setMArch - Override the architecture set by the Module's triple.
+ EngineBuilder &setMArch(StringRef march) {
+ MArch.assign(march.begin(), march.end());
+ return *this;
+ }
+
+ /// setMCPU - Target a specific cpu type.
+ EngineBuilder &setMCPU(StringRef mcpu) {
+ MCPU.assign(mcpu.begin(), mcpu.end());
+ return *this;
+ }
+
+ /// setVerifyModules - Set whether the JIT implementation should verify
+ /// IR modules during compilation.
+ EngineBuilder &setVerifyModules(bool Verify) {
+ VerifyModules = Verify;
+ return *this;
+ }
+
+ /// setMAttrs - Set cpu-specific attributes.
+ template<typename StringSequence>
+ EngineBuilder &setMAttrs(const StringSequence &mattrs) {
+ MAttrs.clear();
+ MAttrs.append(mattrs.begin(), mattrs.end());
+ return *this;
+ }
+
+ // \brief Use OrcMCJITReplacement instead of MCJIT. Off by default.
+ void setUseOrcMCJITReplacement(bool UseOrcMCJITReplacement) {
+ this->UseOrcMCJITReplacement = UseOrcMCJITReplacement;
+ }
+
+ void setEmulatedTLS(bool EmulatedTLS) {
+ this->EmulatedTLS = EmulatedTLS;
+ }
+
+ TargetMachine *selectTarget();
+
+ /// selectTarget - Pick a target either via -march or by guessing the native
+ /// arch. Add any CPU features specified via -mcpu or -mattr.
+ TargetMachine *selectTarget(const Triple &TargetTriple,
+ StringRef MArch,
+ StringRef MCPU,
+ const SmallVectorImpl<std::string>& MAttrs);
+
+ ExecutionEngine *create() {
+ return create(selectTarget());
+ }
+
+ ExecutionEngine *create(TargetMachine *TM);
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ExecutionEngine, LLVMExecutionEngineRef)
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_EXECUTIONENGINE_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/GenericValue.h b/linux-x64/clang/include/llvm/ExecutionEngine/GenericValue.h
new file mode 100644
index 0000000..504e30a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/GenericValue.h
@@ -0,0 +1,55 @@
+//===- GenericValue.h - Represent any type of LLVM value --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The GenericValue class is used to represent an LLVM value of arbitrary type.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_GENERICVALUE_H
+#define LLVM_EXECUTIONENGINE_GENERICVALUE_H
+
+#include "llvm/ADT/APInt.h"
+#include <vector>
+
+namespace llvm {
+
+using PointerTy = void *;
+
+struct GenericValue {
+ struct IntPair {
+ unsigned int first;
+ unsigned int second;
+ };
+ union {
+ double DoubleVal;
+ float FloatVal;
+ PointerTy PointerVal;
+ struct IntPair UIntPairVal;
+ unsigned char Untyped[8];
+ };
+ APInt IntVal; // also used for long doubles.
+ // For aggregate data types.
+ std::vector<GenericValue> AggregateVal;
+
+ // to make code faster, set GenericValue to zero could be omitted, but it is
+ // potentially can cause problems, since GenericValue to store garbage
+ // instead of zero.
+ GenericValue() : IntVal(1, 0) {
+ UIntPairVal.first = 0;
+ UIntPairVal.second = 0;
+ }
+ explicit GenericValue(void *V) : PointerVal(V), IntVal(1, 0) {}
+};
+
+inline GenericValue PTOGV(void *P) { return GenericValue(P); }
+inline void *GVTOP(const GenericValue &GV) { return GV.PointerVal; }
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_GENERICVALUE_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Interpreter.h b/linux-x64/clang/include/llvm/ExecutionEngine/Interpreter.h
new file mode 100644
index 0000000..a147078
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Interpreter.h
@@ -0,0 +1,28 @@
+//===-- Interpreter.h - Abstract Execution Engine Interface -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file forces the interpreter to link in on certain operating systems.
+// (Windows).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_INTERPRETER_H
+#define LLVM_EXECUTIONENGINE_INTERPRETER_H
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+
+extern "C" void LLVMLinkInInterpreter();
+
+namespace {
+ struct ForceInterpreterLinking {
+ ForceInterpreterLinking() { LLVMLinkInInterpreter(); }
+ } ForceInterpreterLinking;
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/JITEventListener.h b/linux-x64/clang/include/llvm/ExecutionEngine/JITEventListener.h
new file mode 100644
index 0000000..ff7840f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/JITEventListener.h
@@ -0,0 +1,124 @@
+//===- JITEventListener.h - Exposes events from JIT compilation -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the JITEventListener interface, which lets users get
+// callbacks when significant events happen during the JIT compilation process.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_JITEVENTLISTENER_H
+#define LLVM_EXECUTIONENGINE_JITEVENTLISTENER_H
+
+#include "llvm/Config/llvm-config.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/IR/DebugLoc.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+
+class IntelJITEventsWrapper;
+class MachineFunction;
+class OProfileWrapper;
+
+namespace object {
+
+class ObjectFile;
+
+} // end namespace object
+
+/// JITEvent_EmittedFunctionDetails - Helper struct for containing information
+/// about a generated machine code function.
+struct JITEvent_EmittedFunctionDetails {
+ struct LineStart {
+ /// The address at which the current line changes.
+ uintptr_t Address;
+
+ /// The new location information. These can be translated to DebugLocTuples
+ /// using MF->getDebugLocTuple().
+ DebugLoc Loc;
+ };
+
+ /// The machine function the struct contains information for.
+ const MachineFunction *MF;
+
+ /// The list of line boundary information, sorted by address.
+ std::vector<LineStart> LineStarts;
+};
+
+/// JITEventListener - Abstract interface for use by the JIT to notify clients
+/// about significant events during compilation. For example, to notify
+/// profilers and debuggers that need to know where functions have been emitted.
+///
+/// The default implementation of each method does nothing.
+class JITEventListener {
+public:
+ using EmittedFunctionDetails = JITEvent_EmittedFunctionDetails;
+
+public:
+ JITEventListener() = default;
+ virtual ~JITEventListener() = default;
+
+ /// NotifyObjectEmitted - Called after an object has been successfully
+ /// emitted to memory. NotifyFunctionEmitted will not be called for
+ /// individual functions in the object.
+ ///
+ /// ELF-specific information
+ /// The ObjectImage contains the generated object image
+ /// with section headers updated to reflect the address at which sections
+ /// were loaded and with relocations performed in-place on debug sections.
+ virtual void NotifyObjectEmitted(const object::ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) {}
+
+ /// NotifyFreeingObject - Called just before the memory associated with
+ /// a previously emitted object is released.
+ virtual void NotifyFreeingObject(const object::ObjectFile &Obj) {}
+
+ // Get a pointe to the GDB debugger registration listener.
+ static JITEventListener *createGDBRegistrationListener();
+
+#if LLVM_USE_INTEL_JITEVENTS
+ // Construct an IntelJITEventListener
+ static JITEventListener *createIntelJITEventListener();
+
+ // Construct an IntelJITEventListener with a test Intel JIT API implementation
+ static JITEventListener *createIntelJITEventListener(
+ IntelJITEventsWrapper* AlternativeImpl);
+#else
+ static JITEventListener *createIntelJITEventListener() { return nullptr; }
+
+ static JITEventListener *createIntelJITEventListener(
+ IntelJITEventsWrapper* AlternativeImpl) {
+ return nullptr;
+ }
+#endif // USE_INTEL_JITEVENTS
+
+#if LLVM_USE_OPROFILE
+ // Construct an OProfileJITEventListener
+ static JITEventListener *createOProfileJITEventListener();
+
+ // Construct an OProfileJITEventListener with a test opagent implementation
+ static JITEventListener *createOProfileJITEventListener(
+ OProfileWrapper* AlternativeImpl);
+#else
+ static JITEventListener *createOProfileJITEventListener() { return nullptr; }
+
+ static JITEventListener *createOProfileJITEventListener(
+ OProfileWrapper* AlternativeImpl) {
+ return nullptr;
+ }
+#endif // USE_OPROFILE
+
+private:
+ virtual void anchor();
+};
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_JITEVENTLISTENER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/JITSymbol.h b/linux-x64/clang/include/llvm/ExecutionEngine/JITSymbol.h
new file mode 100644
index 0000000..86ab173
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/JITSymbol.h
@@ -0,0 +1,344 @@
+//===- JITSymbol.h - JIT symbol abstraction ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Abstraction for target process addresses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_JITSYMBOL_H
+#define LLVM_EXECUTIONENGINE_JITSYMBOL_H
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+#include <map>
+#include <set>
+#include <string>
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+
+class GlobalValue;
+
+namespace object {
+
+class BasicSymbolRef;
+
+} // end namespace object
+
+/// @brief Represents an address in the target process's address space.
+using JITTargetAddress = uint64_t;
+
+/// @brief Flags for symbols in the JIT.
+class JITSymbolFlags {
+public:
+ using UnderlyingType = uint8_t;
+ using TargetFlagsType = uint64_t;
+
+ enum FlagNames : UnderlyingType {
+ None = 0,
+ HasError = 1U << 0,
+ Weak = 1U << 1,
+ Common = 1U << 2,
+ Absolute = 1U << 3,
+ Exported = 1U << 4,
+ NotMaterialized = 1U << 5
+ };
+
+ static JITSymbolFlags stripTransientFlags(JITSymbolFlags Orig) {
+ return static_cast<FlagNames>(Orig.Flags & ~NotMaterialized);
+ }
+
+ /// @brief Default-construct a JITSymbolFlags instance.
+ JITSymbolFlags() = default;
+
+ /// @brief Construct a JITSymbolFlags instance from the given flags.
+ JITSymbolFlags(FlagNames Flags) : Flags(Flags) {}
+
+ /// @brief Construct a JITSymbolFlags instance from the given flags and target
+ /// flags.
+ JITSymbolFlags(FlagNames Flags, TargetFlagsType TargetFlags)
+ : Flags(Flags), TargetFlags(TargetFlags) {}
+
+ /// @brief Return true if there was an error retrieving this symbol.
+ bool hasError() const {
+ return (Flags & HasError) == HasError;
+ }
+
+ /// @brief Returns true if this symbol has been fully materialized (i.e. is
+ /// callable).
+ bool isMaterialized() const { return !(Flags & NotMaterialized); }
+
+ /// @brief Returns true if the Weak flag is set.
+ bool isWeak() const {
+ return (Flags & Weak) == Weak;
+ }
+
+ /// @brief Returns true if the Common flag is set.
+ bool isCommon() const {
+ return (Flags & Common) == Common;
+ }
+
+ /// @brief Returns true if the symbol isn't weak or common.
+ bool isStrong() const {
+ return !isWeak() && !isCommon();
+ }
+
+ /// @brief Returns true if the Exported flag is set.
+ bool isExported() const {
+ return (Flags & Exported) == Exported;
+ }
+
+ /// @brief Implicitly convert to the underlying flags type.
+ operator UnderlyingType&() { return Flags; }
+
+ /// @brief Implicitly convert to the underlying flags type.
+ operator const UnderlyingType&() const { return Flags; }
+
+ /// @brief Return a reference to the target-specific flags.
+ TargetFlagsType& getTargetFlags() { return TargetFlags; }
+
+ /// @brief Return a reference to the target-specific flags.
+ const TargetFlagsType& getTargetFlags() const { return TargetFlags; }
+
+ /// Construct a JITSymbolFlags value based on the flags of the given global
+ /// value.
+ static JITSymbolFlags fromGlobalValue(const GlobalValue &GV);
+
+ /// Construct a JITSymbolFlags value based on the flags of the given libobject
+ /// symbol.
+ static JITSymbolFlags fromObjectSymbol(const object::BasicSymbolRef &Symbol);
+
+private:
+ UnderlyingType Flags = None;
+ TargetFlagsType TargetFlags = 0;
+};
+
+/// @brief ARM-specific JIT symbol flags.
+/// FIXME: This should be moved into a target-specific header.
+class ARMJITSymbolFlags {
+public:
+ ARMJITSymbolFlags() = default;
+
+ enum FlagNames {
+ None = 0,
+ Thumb = 1 << 0
+ };
+
+ operator JITSymbolFlags::TargetFlagsType&() { return Flags; }
+
+ static ARMJITSymbolFlags fromObjectSymbol(
+ const object::BasicSymbolRef &Symbol);
+private:
+ JITSymbolFlags::TargetFlagsType Flags = 0;
+};
+
+/// @brief Represents a symbol that has been evaluated to an address already.
+class JITEvaluatedSymbol {
+public:
+ JITEvaluatedSymbol() = default;
+
+ /// @brief Create a 'null' symbol.
+ JITEvaluatedSymbol(std::nullptr_t) {}
+
+ /// @brief Create a symbol for the given address and flags.
+ JITEvaluatedSymbol(JITTargetAddress Address, JITSymbolFlags Flags)
+ : Address(Address), Flags(Flags) {}
+
+ /// @brief An evaluated symbol converts to 'true' if its address is non-zero.
+ explicit operator bool() const { return Address != 0; }
+
+ /// @brief Return the address of this symbol.
+ JITTargetAddress getAddress() const { return Address; }
+
+ /// @brief Return the flags for this symbol.
+ JITSymbolFlags getFlags() const { return Flags; }
+
+private:
+ JITTargetAddress Address = 0;
+ JITSymbolFlags Flags;
+};
+
+/// @brief Represents a symbol in the JIT.
+class JITSymbol {
+public:
+ using GetAddressFtor = std::function<Expected<JITTargetAddress>()>;
+
+ /// @brief Create a 'null' symbol, used to represent a "symbol not found"
+ /// result from a successful (non-erroneous) lookup.
+ JITSymbol(std::nullptr_t)
+ : CachedAddr(0) {}
+
+ /// @brief Create a JITSymbol representing an error in the symbol lookup
+ /// process (e.g. a network failure during a remote lookup).
+ JITSymbol(Error Err)
+ : Err(std::move(Err)), Flags(JITSymbolFlags::HasError) {}
+
+ /// @brief Create a symbol for a definition with a known address.
+ JITSymbol(JITTargetAddress Addr, JITSymbolFlags Flags)
+ : CachedAddr(Addr), Flags(Flags) {}
+
+ /// @brief Construct a JITSymbol from a JITEvaluatedSymbol.
+ JITSymbol(JITEvaluatedSymbol Sym)
+ : CachedAddr(Sym.getAddress()), Flags(Sym.getFlags()) {}
+
+ /// @brief Create a symbol for a definition that doesn't have a known address
+ /// yet.
+ /// @param GetAddress A functor to materialize a definition (fixing the
+ /// address) on demand.
+ ///
+ /// This constructor allows a JIT layer to provide a reference to a symbol
+ /// definition without actually materializing the definition up front. The
+ /// user can materialize the definition at any time by calling the getAddress
+ /// method.
+ JITSymbol(GetAddressFtor GetAddress, JITSymbolFlags Flags)
+ : GetAddress(std::move(GetAddress)), CachedAddr(0), Flags(Flags) {}
+
+ JITSymbol(const JITSymbol&) = delete;
+ JITSymbol& operator=(const JITSymbol&) = delete;
+
+ JITSymbol(JITSymbol &&Other)
+ : GetAddress(std::move(Other.GetAddress)), Flags(std::move(Other.Flags)) {
+ if (Flags.hasError())
+ Err = std::move(Other.Err);
+ else
+ CachedAddr = std::move(Other.CachedAddr);
+ }
+
+ JITSymbol& operator=(JITSymbol &&Other) {
+ GetAddress = std::move(Other.GetAddress);
+ Flags = std::move(Other.Flags);
+ if (Flags.hasError())
+ Err = std::move(Other.Err);
+ else
+ CachedAddr = std::move(Other.CachedAddr);
+ return *this;
+ }
+
+ ~JITSymbol() {
+ if (Flags.hasError())
+ Err.~Error();
+ else
+ CachedAddr.~JITTargetAddress();
+ }
+
+ /// @brief Returns true if the symbol exists, false otherwise.
+ explicit operator bool() const {
+ return !Flags.hasError() && (CachedAddr || GetAddress);
+ }
+
+ /// @brief Move the error field value out of this JITSymbol.
+ Error takeError() {
+ if (Flags.hasError())
+ return std::move(Err);
+ return Error::success();
+ }
+
+ /// @brief Get the address of the symbol in the target address space. Returns
+ /// '0' if the symbol does not exist.
+ Expected<JITTargetAddress> getAddress() {
+ assert(!Flags.hasError() && "getAddress called on error value");
+ if (GetAddress) {
+ if (auto CachedAddrOrErr = GetAddress()) {
+ GetAddress = nullptr;
+ CachedAddr = *CachedAddrOrErr;
+ assert(CachedAddr && "Symbol could not be materialized.");
+ } else
+ return CachedAddrOrErr.takeError();
+ }
+ return CachedAddr;
+ }
+
+ JITSymbolFlags getFlags() const { return Flags; }
+
+private:
+ GetAddressFtor GetAddress;
+ union {
+ JITTargetAddress CachedAddr;
+ Error Err;
+ };
+ JITSymbolFlags Flags;
+};
+
+/// @brief Symbol resolution interface.
+///
+/// Allows symbol flags and addresses to be looked up by name.
+/// Symbol queries are done in bulk (i.e. you request resolution of a set of
+/// symbols, rather than a single one) to reduce IPC overhead in the case of
+/// remote JITing, and expose opportunities for parallel compilation.
+class JITSymbolResolver {
+public:
+ using LookupSet = std::set<StringRef>;
+ using LookupResult = std::map<StringRef, JITEvaluatedSymbol>;
+ using LookupFlagsResult = std::map<StringRef, JITSymbolFlags>;
+
+ virtual ~JITSymbolResolver() = default;
+
+ /// @brief Returns the fully resolved address and flags for each of the given
+ /// symbols.
+ ///
+ /// This method will return an error if any of the given symbols can not be
+ /// resolved, or if the resolution process itself triggers an error.
+ virtual Expected<LookupResult> lookup(const LookupSet &Symbols) = 0;
+
+ /// @brief Returns the symbol flags for each of the given symbols.
+ ///
+ /// This method does NOT return an error if any of the given symbols is
+ /// missing. Instead, that symbol will be left out of the result map.
+ virtual Expected<LookupFlagsResult> lookupFlags(const LookupSet &Symbols) = 0;
+
+private:
+ virtual void anchor();
+};
+
+/// \brief Legacy symbol resolution interface.
+class LegacyJITSymbolResolver : public JITSymbolResolver {
+public:
+ /// @brief Performs lookup by, for each symbol, first calling
+ /// findSymbolInLogicalDylib and if that fails calling
+ /// findSymbol.
+ Expected<LookupResult> lookup(const LookupSet &Symbols) final;
+
+ /// @brief Performs flags lookup by calling findSymbolInLogicalDylib and
+ /// returning the flags value for that symbol.
+ Expected<LookupFlagsResult> lookupFlags(const LookupSet &Symbols) final;
+
+ /// This method returns the address of the specified symbol if it exists
+ /// within the logical dynamic library represented by this JITSymbolResolver.
+ /// Unlike findSymbol, queries through this interface should return addresses
+ /// for hidden symbols.
+ ///
+ /// This is of particular importance for the Orc JIT APIs, which support lazy
+ /// compilation by breaking up modules: Each of those broken out modules
+ /// must be able to resolve hidden symbols provided by the others. Clients
+ /// writing memory managers for MCJIT can usually ignore this method.
+ ///
+ /// This method will be queried by RuntimeDyld when checking for previous
+ /// definitions of common symbols.
+ virtual JITSymbol findSymbolInLogicalDylib(const std::string &Name) = 0;
+
+ /// This method returns the address of the specified function or variable.
+ /// It is used to resolve symbols during module linking.
+ ///
+ /// If the returned symbol's address is equal to ~0ULL then RuntimeDyld will
+ /// skip all relocations for that symbol, and the client will be responsible
+ /// for handling them manually.
+ virtual JITSymbol findSymbol(const std::string &Name) = 0;
+
+private:
+ virtual void anchor();
+};
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_JITSYMBOL_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/MCJIT.h b/linux-x64/clang/include/llvm/ExecutionEngine/MCJIT.h
new file mode 100644
index 0000000..66ddb7c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/MCJIT.h
@@ -0,0 +1,38 @@
+//===-- MCJIT.h - MC-Based Just-In-Time Execution Engine --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file forces the MCJIT to link in on certain operating systems.
+// (Windows).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_MCJIT_H
+#define LLVM_EXECUTIONENGINE_MCJIT_H
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include <cstdlib>
+
+extern "C" void LLVMLinkInMCJIT();
+
+namespace {
+ struct ForceMCJITLinking {
+ ForceMCJITLinking() {
+ // We must reference MCJIT in such a way that compilers will not
+ // delete it all as dead code, even with whole program optimization,
+ // yet is effectively a NO-OP. As the compiler isn't smart enough
+ // to know that getenv() never returns -1, this will do the job.
+ if (std::getenv("bar") != (char*) -1)
+ return;
+
+ LLVMLinkInMCJIT();
+ }
+ } ForceMCJITLinking;
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/OProfileWrapper.h b/linux-x64/clang/include/llvm/ExecutionEngine/OProfileWrapper.h
new file mode 100644
index 0000000..05da594
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/OProfileWrapper.h
@@ -0,0 +1,124 @@
+//===-- OProfileWrapper.h - OProfile JIT API Wrapper ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file defines a OProfileWrapper object that detects if the oprofile
+// daemon is running, and provides wrappers for opagent functions used to
+// communicate with the oprofile JIT interface. The dynamic library libopagent
+// does not need to be linked directly as this object lazily loads the library
+// when the first op_ function is called.
+//
+// See http://oprofile.sourceforge.net/doc/devel/jit-interface.html for the
+// definition of the interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_OPROFILEWRAPPER_H
+#define LLVM_EXECUTIONENGINE_OPROFILEWRAPPER_H
+
+#include "llvm/Support/DataTypes.h"
+#include <opagent.h>
+
+namespace llvm {
+
+
+class OProfileWrapper {
+ typedef op_agent_t (*op_open_agent_ptr_t)();
+ typedef int (*op_close_agent_ptr_t)(op_agent_t);
+ typedef int (*op_write_native_code_ptr_t)(op_agent_t,
+ const char*,
+ uint64_t,
+ void const*,
+ const unsigned int);
+ typedef int (*op_write_debug_line_info_ptr_t)(op_agent_t,
+ void const*,
+ size_t,
+ struct debug_line_info const*);
+ typedef int (*op_unload_native_code_ptr_t)(op_agent_t, uint64_t);
+
+ // Also used for op_minor_version function which has the same signature
+ typedef int (*op_major_version_ptr_t)();
+
+ // This is not a part of the opagent API, but is useful nonetheless
+ typedef bool (*IsOProfileRunningPtrT)();
+
+
+ op_agent_t Agent;
+ op_open_agent_ptr_t OpenAgentFunc;
+ op_close_agent_ptr_t CloseAgentFunc;
+ op_write_native_code_ptr_t WriteNativeCodeFunc;
+ op_write_debug_line_info_ptr_t WriteDebugLineInfoFunc;
+ op_unload_native_code_ptr_t UnloadNativeCodeFunc;
+ op_major_version_ptr_t MajorVersionFunc;
+ op_major_version_ptr_t MinorVersionFunc;
+ IsOProfileRunningPtrT IsOProfileRunningFunc;
+
+ bool Initialized;
+
+public:
+ OProfileWrapper();
+
+ // For testing with a mock opagent implementation, skips the dynamic load and
+ // the function resolution.
+ OProfileWrapper(op_open_agent_ptr_t OpenAgentImpl,
+ op_close_agent_ptr_t CloseAgentImpl,
+ op_write_native_code_ptr_t WriteNativeCodeImpl,
+ op_write_debug_line_info_ptr_t WriteDebugLineInfoImpl,
+ op_unload_native_code_ptr_t UnloadNativeCodeImpl,
+ op_major_version_ptr_t MajorVersionImpl,
+ op_major_version_ptr_t MinorVersionImpl,
+ IsOProfileRunningPtrT MockIsOProfileRunningImpl = 0)
+ : OpenAgentFunc(OpenAgentImpl),
+ CloseAgentFunc(CloseAgentImpl),
+ WriteNativeCodeFunc(WriteNativeCodeImpl),
+ WriteDebugLineInfoFunc(WriteDebugLineInfoImpl),
+ UnloadNativeCodeFunc(UnloadNativeCodeImpl),
+ MajorVersionFunc(MajorVersionImpl),
+ MinorVersionFunc(MinorVersionImpl),
+ IsOProfileRunningFunc(MockIsOProfileRunningImpl),
+ Initialized(true)
+ {
+ }
+
+ // Calls op_open_agent in the oprofile JIT library and saves the returned
+ // op_agent_t handle internally so it can be used when calling all the other
+ // op_* functions. Callers of this class do not need to keep track of
+ // op_agent_t objects.
+ bool op_open_agent();
+
+ int op_close_agent();
+ int op_write_native_code(const char* name,
+ uint64_t addr,
+ void const* code,
+ const unsigned int size);
+ int op_write_debug_line_info(void const* code,
+ size_t num_entries,
+ struct debug_line_info const* info);
+ int op_unload_native_code(uint64_t addr);
+ int op_major_version();
+ int op_minor_version();
+
+ // Returns true if the oprofiled process is running, the opagent library is
+ // loaded and a connection to the agent has been established, and false
+ // otherwise.
+ bool isAgentAvailable();
+
+private:
+ // Loads the libopagent library and initializes this wrapper if the oprofile
+ // daemon is running
+ bool initialize();
+
+ // Searches /proc for the oprofile daemon and returns true if the process if
+ // found, or false otherwise.
+ bool checkForOProfileProcEntry();
+
+ bool isOProfileRunning();
+};
+
+} // namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_OPROFILEWRAPPER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/ObjectCache.h b/linux-x64/clang/include/llvm/ExecutionEngine/ObjectCache.h
new file mode 100644
index 0000000..0770444
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/ObjectCache.h
@@ -0,0 +1,42 @@
+//===-- ObjectCache.h - Class definition for the ObjectCache ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_OBJECTCACHE_H
+#define LLVM_EXECUTIONENGINE_OBJECTCACHE_H
+
+#include "llvm/Support/MemoryBuffer.h"
+#include <memory>
+
+namespace llvm {
+
+class Module;
+
+/// This is the base ObjectCache type which can be provided to an
+/// ExecutionEngine for the purpose of avoiding compilation for Modules that
+/// have already been compiled and an object file is available.
+class ObjectCache {
+ virtual void anchor();
+
+public:
+ ObjectCache() = default;
+
+ virtual ~ObjectCache() = default;
+
+ /// notifyObjectCompiled - Provides a pointer to compiled code for Module M.
+ virtual void notifyObjectCompiled(const Module *M, MemoryBufferRef Obj) = 0;
+
+ /// Returns a pointer to a newly allocated MemoryBuffer that contains the
+ /// object which corresponds with Module M, or 0 if an object is not
+ /// available.
+ virtual std::unique_ptr<MemoryBuffer> getObject(const Module* M) = 0;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_OBJECTCACHE_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/ObjectMemoryBuffer.h b/linux-x64/clang/include/llvm/ExecutionEngine/ObjectMemoryBuffer.h
new file mode 100644
index 0000000..0f00ad0
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/ObjectMemoryBuffer.h
@@ -0,0 +1,63 @@
+//===- ObjectMemoryBuffer.h - SmallVector-backed MemoryBuffrer -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares a wrapper class to hold the memory into which an
+// object will be generated.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_OBJECTMEMORYBUFFER_H
+#define LLVM_EXECUTIONENGINE_OBJECTMEMORYBUFFER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// \brief SmallVector-backed MemoryBuffer instance.
+///
+/// This class enables efficient construction of MemoryBuffers from SmallVector
+/// instances. This is useful for MCJIT and Orc, where object files are streamed
+/// into SmallVectors, then inspected using ObjectFile (which takes a
+/// MemoryBuffer).
+class ObjectMemoryBuffer : public MemoryBuffer {
+public:
+
+ /// \brief Construct an ObjectMemoryBuffer from the given SmallVector r-value.
+ ///
+ /// FIXME: It'd be nice for this to be a non-templated constructor taking a
+ /// SmallVectorImpl here instead of a templated one taking a SmallVector<N>,
+ /// but SmallVector's move-construction/assignment currently only take
+ /// SmallVectors. If/when that is fixed we can simplify this constructor and
+ /// the following one.
+ ObjectMemoryBuffer(SmallVectorImpl<char> &&SV)
+ : SV(std::move(SV)), BufferName("<in-memory object>") {
+ init(this->SV.begin(), this->SV.end(), false);
+ }
+
+ /// \brief Construct a named ObjectMemoryBuffer from the given SmallVector
+ /// r-value and StringRef.
+ ObjectMemoryBuffer(SmallVectorImpl<char> &&SV, StringRef Name)
+ : SV(std::move(SV)), BufferName(Name) {
+ init(this->SV.begin(), this->SV.end(), false);
+ }
+
+ StringRef getBufferIdentifier() const override { return BufferName; }
+
+ BufferKind getBufferKind() const override { return MemoryBuffer_Malloc; }
+
+private:
+ SmallVector<char, 0> SV;
+ std::string BufferName;
+};
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
new file mode 100644
index 0000000..a64a6dd
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
@@ -0,0 +1,681 @@
+//===- CompileOnDemandLayer.h - Compile each function on demand -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// JIT layer for breaking up modules and inserting callbacks to allow
+// individual functions to be compiled on demand.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
+#include "llvm/ExecutionEngine/Orc/LambdaResolver.h"
+#include "llvm/ExecutionEngine/Orc/OrcError.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+#include <algorithm>
+#include <cassert>
+#include <functional>
+#include <iterator>
+#include <list>
+#include <memory>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class Value;
+
+namespace orc {
+
+/// @brief Compile-on-demand layer.
+///
+/// When a module is added to this layer a stub is created for each of its
+/// function definitions. The stubs and other global values are immediately
+/// added to the layer below. When a stub is called it triggers the extraction
+/// of the function body from the original module. The extracted body is then
+/// compiled and executed.
+template <typename BaseLayerT,
+ typename CompileCallbackMgrT = JITCompileCallbackManager,
+ typename IndirectStubsMgrT = IndirectStubsManager>
+class CompileOnDemandLayer {
+private:
+ template <typename MaterializerFtor>
+ class LambdaMaterializer final : public ValueMaterializer {
+ public:
+ LambdaMaterializer(MaterializerFtor M) : M(std::move(M)) {}
+
+ Value *materialize(Value *V) final { return M(V); }
+
+ private:
+ MaterializerFtor M;
+ };
+
+ template <typename MaterializerFtor>
+ LambdaMaterializer<MaterializerFtor>
+ createLambdaMaterializer(MaterializerFtor M) {
+ return LambdaMaterializer<MaterializerFtor>(std::move(M));
+ }
+
+ // Provide type-erasure for the Modules and MemoryManagers.
+ template <typename ResourceT>
+ class ResourceOwner {
+ public:
+ ResourceOwner() = default;
+ ResourceOwner(const ResourceOwner &) = delete;
+ ResourceOwner &operator=(const ResourceOwner &) = delete;
+ virtual ~ResourceOwner() = default;
+
+ virtual ResourceT& getResource() const = 0;
+ };
+
+ template <typename ResourceT, typename ResourcePtrT>
+ class ResourceOwnerImpl : public ResourceOwner<ResourceT> {
+ public:
+ ResourceOwnerImpl(ResourcePtrT ResourcePtr)
+ : ResourcePtr(std::move(ResourcePtr)) {}
+
+ ResourceT& getResource() const override { return *ResourcePtr; }
+
+ private:
+ ResourcePtrT ResourcePtr;
+ };
+
+ template <typename ResourceT, typename ResourcePtrT>
+ std::unique_ptr<ResourceOwner<ResourceT>>
+ wrapOwnership(ResourcePtrT ResourcePtr) {
+ using RO = ResourceOwnerImpl<ResourceT, ResourcePtrT>;
+ return llvm::make_unique<RO>(std::move(ResourcePtr));
+ }
+
+ class StaticGlobalRenamer {
+ public:
+ StaticGlobalRenamer() = default;
+ StaticGlobalRenamer(StaticGlobalRenamer &&) = default;
+ StaticGlobalRenamer &operator=(StaticGlobalRenamer &&) = default;
+
+ void rename(Module &M) {
+ for (auto &F : M)
+ if (F.hasLocalLinkage())
+ F.setName("$static." + Twine(NextId++));
+ for (auto &G : M.globals())
+ if (G.hasLocalLinkage())
+ G.setName("$static." + Twine(NextId++));
+ }
+
+ private:
+ unsigned NextId = 0;
+ };
+
+ struct LogicalDylib {
+ struct SourceModuleEntry {
+ std::unique_ptr<Module> SourceMod;
+ std::set<Function*> StubsToClone;
+ };
+
+ using SourceModulesList = std::vector<SourceModuleEntry>;
+ using SourceModuleHandle = typename SourceModulesList::size_type;
+
+ LogicalDylib() = default;
+
+ LogicalDylib(VModuleKey K, std::shared_ptr<SymbolResolver> BackingResolver,
+ std::unique_ptr<IndirectStubsMgrT> StubsMgr)
+ : K(std::move(K)), BackingResolver(std::move(BackingResolver)),
+ StubsMgr(std::move(StubsMgr)) {}
+
+ SourceModuleHandle addSourceModule(std::unique_ptr<Module> M) {
+ SourceModuleHandle H = SourceModules.size();
+ SourceModules.push_back(SourceModuleEntry());
+ SourceModules.back().SourceMod = std::move(M);
+ return H;
+ }
+
+ Module& getSourceModule(SourceModuleHandle H) {
+ return *SourceModules[H].SourceMod;
+ }
+
+ std::set<Function*>& getStubsToClone(SourceModuleHandle H) {
+ return SourceModules[H].StubsToClone;
+ }
+
+ JITSymbol findSymbol(BaseLayerT &BaseLayer, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ if (auto Sym = StubsMgr->findStub(Name, ExportedSymbolsOnly))
+ return Sym;
+ for (auto BLK : BaseLayerVModuleKeys)
+ if (auto Sym = BaseLayer.findSymbolIn(BLK, Name, ExportedSymbolsOnly))
+ return Sym;
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
+ return nullptr;
+ }
+
+ Error removeModulesFromBaseLayer(BaseLayerT &BaseLayer) {
+ for (auto &BLK : BaseLayerVModuleKeys)
+ if (auto Err = BaseLayer.removeModule(BLK))
+ return Err;
+ return Error::success();
+ }
+
+ VModuleKey K;
+ std::shared_ptr<SymbolResolver> BackingResolver;
+ std::unique_ptr<IndirectStubsMgrT> StubsMgr;
+ StaticGlobalRenamer StaticRenamer;
+ SourceModulesList SourceModules;
+ std::vector<VModuleKey> BaseLayerVModuleKeys;
+ };
+
+public:
+
+ /// @brief Module partitioning functor.
+ using PartitioningFtor = std::function<std::set<Function*>(Function&)>;
+
+ /// @brief Builder for IndirectStubsManagers.
+ using IndirectStubsManagerBuilderT =
+ std::function<std::unique_ptr<IndirectStubsMgrT>()>;
+
+ using SymbolResolverGetter =
+ std::function<std::shared_ptr<SymbolResolver>(VModuleKey K)>;
+
+ using SymbolResolverSetter =
+ std::function<void(VModuleKey K, std::shared_ptr<SymbolResolver> R)>;
+
+ /// @brief Construct a compile-on-demand layer instance.
+ CompileOnDemandLayer(ExecutionSession &ES, BaseLayerT &BaseLayer,
+ SymbolResolverGetter GetSymbolResolver,
+ SymbolResolverSetter SetSymbolResolver,
+ PartitioningFtor Partition,
+ CompileCallbackMgrT &CallbackMgr,
+ IndirectStubsManagerBuilderT CreateIndirectStubsManager,
+ bool CloneStubsIntoPartitions = true)
+ : ES(ES), BaseLayer(BaseLayer),
+ GetSymbolResolver(std::move(GetSymbolResolver)),
+ SetSymbolResolver(std::move(SetSymbolResolver)),
+ Partition(std::move(Partition)), CompileCallbackMgr(CallbackMgr),
+ CreateIndirectStubsManager(std::move(CreateIndirectStubsManager)),
+ CloneStubsIntoPartitions(CloneStubsIntoPartitions) {}
+
+ ~CompileOnDemandLayer() {
+ // FIXME: Report error on log.
+ while (!LogicalDylibs.empty())
+ consumeError(removeModule(LogicalDylibs.begin()->first));
+ }
+
+ /// @brief Add a module to the compile-on-demand layer.
+ Error addModule(VModuleKey K, std::unique_ptr<Module> M) {
+
+ assert(!LogicalDylibs.count(K) && "VModuleKey K already in use");
+ auto I = LogicalDylibs.insert(
+ LogicalDylibs.end(),
+ std::make_pair(K, LogicalDylib(K, GetSymbolResolver(K),
+ CreateIndirectStubsManager())));
+
+ return addLogicalModule(I->second, std::move(M));
+ }
+
+ /// @brief Add extra modules to an existing logical module.
+ Error addExtraModule(VModuleKey K, std::unique_ptr<Module> M) {
+ return addLogicalModule(LogicalDylibs[K], std::move(M));
+ }
+
+ /// @brief Remove the module represented by the given key.
+ ///
+ /// This will remove all modules in the layers below that were derived from
+ /// the module represented by K.
+ Error removeModule(VModuleKey K) {
+ auto I = LogicalDylibs.find(K);
+ assert(I != LogicalDylibs.end() && "VModuleKey K not valid here");
+ auto Err = I->second.removeModulesFromBaseLayer(BaseLayer);
+ LogicalDylibs.erase(I);
+ return Err;
+ }
+
+ /// @brief Search for the given named symbol.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it exists.
+ JITSymbol findSymbol(StringRef Name, bool ExportedSymbolsOnly) {
+ for (auto &KV : LogicalDylibs) {
+ if (auto Sym = KV.second.StubsMgr->findStub(Name, ExportedSymbolsOnly))
+ return Sym;
+ if (auto Sym = findSymbolIn(KV.first, Name, ExportedSymbolsOnly))
+ return Sym;
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
+ }
+ return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Get the address of a symbol provided by this layer, or some layer
+ /// below this one.
+ JITSymbol findSymbolIn(VModuleKey K, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ assert(LogicalDylibs.count(K) && "VModuleKey K is not valid here");
+ return LogicalDylibs[K].findSymbol(BaseLayer, Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Update the stub for the given function to point at FnBodyAddr.
+ /// This can be used to support re-optimization.
+ /// @return true if the function exists and the stub is updated, false
+ /// otherwise.
+ //
+ // FIXME: We should track and free associated resources (unused compile
+ // callbacks, uncompiled IR, and no-longer-needed/reachable function
+ // implementations).
+ Error updatePointer(std::string FuncName, JITTargetAddress FnBodyAddr) {
+ //Find out which logical dylib contains our symbol
+ auto LDI = LogicalDylibs.begin();
+ for (auto LDE = LogicalDylibs.end(); LDI != LDE; ++LDI) {
+ if (auto LMResources =
+ LDI->getLogicalModuleResourcesForSymbol(FuncName, false)) {
+ Module &SrcM = LMResources->SourceModule->getResource();
+ std::string CalledFnName = mangle(FuncName, SrcM.getDataLayout());
+ if (auto Err = LMResources->StubsMgr->updatePointer(CalledFnName,
+ FnBodyAddr))
+ return Err;
+ return Error::success();
+ }
+ }
+ return make_error<JITSymbolNotFound>(FuncName);
+ }
+
+private:
+ Error addLogicalModule(LogicalDylib &LD, std::unique_ptr<Module> SrcMPtr) {
+
+ // Rename all static functions / globals to $static.X :
+ // This will unique the names across all modules in the logical dylib,
+ // simplifying symbol lookup.
+ LD.StaticRenamer.rename(*SrcMPtr);
+
+ // Bump the linkage and rename any anonymous/privote members in SrcM to
+ // ensure that everything will resolve properly after we partition SrcM.
+ makeAllSymbolsExternallyAccessible(*SrcMPtr);
+
+ // Create a logical module handle for SrcM within the logical dylib.
+ Module &SrcM = *SrcMPtr;
+ auto LMId = LD.addSourceModule(std::move(SrcMPtr));
+
+ // Create stub functions.
+ const DataLayout &DL = SrcM.getDataLayout();
+ {
+ typename IndirectStubsMgrT::StubInitsMap StubInits;
+ for (auto &F : SrcM) {
+ // Skip declarations.
+ if (F.isDeclaration())
+ continue;
+
+ // Skip weak functions for which we already have definitions.
+ auto MangledName = mangle(F.getName(), DL);
+ if (F.hasWeakLinkage() || F.hasLinkOnceLinkage()) {
+ if (auto Sym = LD.findSymbol(BaseLayer, MangledName, false))
+ continue;
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
+ }
+
+ // Record all functions defined by this module.
+ if (CloneStubsIntoPartitions)
+ LD.getStubsToClone(LMId).insert(&F);
+
+ // Create a callback, associate it with the stub for the function,
+ // and set the compile action to compile the partition containing the
+ // function.
+ if (auto CCInfoOrErr = CompileCallbackMgr.getCompileCallback()) {
+ auto &CCInfo = *CCInfoOrErr;
+ StubInits[MangledName] =
+ std::make_pair(CCInfo.getAddress(),
+ JITSymbolFlags::fromGlobalValue(F));
+ CCInfo.setCompileAction([this, &LD, LMId, &F]() -> JITTargetAddress {
+ if (auto FnImplAddrOrErr = this->extractAndCompile(LD, LMId, F))
+ return *FnImplAddrOrErr;
+ else {
+ // FIXME: Report error, return to 'abort' or something similar.
+ consumeError(FnImplAddrOrErr.takeError());
+ return 0;
+ }
+ });
+ } else
+ return CCInfoOrErr.takeError();
+ }
+
+ if (auto Err = LD.StubsMgr->createStubs(StubInits))
+ return Err;
+ }
+
+ // If this module doesn't contain any globals, aliases, or module flags then
+ // we can bail out early and avoid the overhead of creating and managing an
+ // empty globals module.
+ if (SrcM.global_empty() && SrcM.alias_empty() &&
+ !SrcM.getModuleFlagsMetadata())
+ return Error::success();
+
+ // Create the GlobalValues module.
+ auto GVsM = llvm::make_unique<Module>((SrcM.getName() + ".globals").str(),
+ SrcM.getContext());
+ GVsM->setDataLayout(DL);
+
+ ValueToValueMapTy VMap;
+
+ // Clone global variable decls.
+ for (auto &GV : SrcM.globals())
+ if (!GV.isDeclaration() && !VMap.count(&GV))
+ cloneGlobalVariableDecl(*GVsM, GV, &VMap);
+
+ // And the aliases.
+ for (auto &A : SrcM.aliases())
+ if (!VMap.count(&A))
+ cloneGlobalAliasDecl(*GVsM, A, VMap);
+
+ // Clone the module flags.
+ cloneModuleFlagsMetadata(*GVsM, SrcM, VMap);
+
+ // Now we need to clone the GV and alias initializers.
+
+ // Initializers may refer to functions declared (but not defined) in this
+ // module. Build a materializer to clone decls on demand.
+ Error MaterializerErrors = Error::success();
+ auto Materializer = createLambdaMaterializer(
+ [&LD, &GVsM, &MaterializerErrors](Value *V) -> Value* {
+ if (auto *F = dyn_cast<Function>(V)) {
+ // Decls in the original module just get cloned.
+ if (F->isDeclaration())
+ return cloneFunctionDecl(*GVsM, *F);
+
+ // Definitions in the original module (which we have emitted stubs
+ // for at this point) get turned into a constant alias to the stub
+ // instead.
+ const DataLayout &DL = GVsM->getDataLayout();
+ std::string FName = mangle(F->getName(), DL);
+ unsigned PtrBitWidth = DL.getPointerTypeSizeInBits(F->getType());
+ JITTargetAddress StubAddr = 0;
+
+ // Get the address for the stub. If we encounter an error while
+ // doing so, stash it in the MaterializerErrors variable and use a
+ // null address as a placeholder.
+ if (auto StubSym = LD.StubsMgr->findStub(FName, false)) {
+ if (auto StubAddrOrErr = StubSym.getAddress())
+ StubAddr = *StubAddrOrErr;
+ else
+ MaterializerErrors = joinErrors(std::move(MaterializerErrors),
+ StubAddrOrErr.takeError());
+ }
+
+ ConstantInt *StubAddrCI =
+ ConstantInt::get(GVsM->getContext(), APInt(PtrBitWidth, StubAddr));
+ Constant *Init = ConstantExpr::getCast(Instruction::IntToPtr,
+ StubAddrCI, F->getType());
+ return GlobalAlias::create(F->getFunctionType(),
+ F->getType()->getAddressSpace(),
+ F->getLinkage(), F->getName(),
+ Init, GVsM.get());
+ }
+ // else....
+ return nullptr;
+ });
+
+ // Clone the global variable initializers.
+ for (auto &GV : SrcM.globals())
+ if (!GV.isDeclaration())
+ moveGlobalVariableInitializer(GV, VMap, &Materializer);
+
+ // Clone the global alias initializers.
+ for (auto &A : SrcM.aliases()) {
+ auto *NewA = cast<GlobalAlias>(VMap[&A]);
+ assert(NewA && "Alias not cloned?");
+ Value *Init = MapValue(A.getAliasee(), VMap, RF_None, nullptr,
+ &Materializer);
+ NewA->setAliasee(cast<Constant>(Init));
+ }
+
+ if (MaterializerErrors)
+ return MaterializerErrors;
+
+ // Build a resolver for the globals module and add it to the base layer.
+ auto LegacyLookup = [this, &LD](const std::string &Name) -> JITSymbol {
+ if (auto Sym = LD.StubsMgr->findStub(Name, false))
+ return Sym;
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
+
+ if (auto Sym = LD.findSymbol(BaseLayer, Name, false))
+ return Sym;
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
+
+ return nullptr;
+ };
+
+ auto GVsResolver = createSymbolResolver(
+ [&LD, LegacyLookup](SymbolFlagsMap &SymbolFlags,
+ const SymbolNameSet &Symbols) {
+ auto NotFoundViaLegacyLookup =
+ lookupFlagsWithLegacyFn(SymbolFlags, Symbols, LegacyLookup);
+
+ if (!NotFoundViaLegacyLookup) {
+ logAllUnhandledErrors(NotFoundViaLegacyLookup.takeError(), errs(),
+ "CODLayer/GVsResolver flags lookup failed: ");
+ SymbolFlags.clear();
+ return SymbolNameSet();
+ }
+
+ return LD.BackingResolver->lookupFlags(SymbolFlags,
+ *NotFoundViaLegacyLookup);
+ },
+ [&LD, LegacyLookup](std::shared_ptr<AsynchronousSymbolQuery> Query,
+ SymbolNameSet Symbols) {
+ auto NotFoundViaLegacyLookup =
+ lookupWithLegacyFn(*Query, Symbols, LegacyLookup);
+ return LD.BackingResolver->lookup(Query, NotFoundViaLegacyLookup);
+ });
+
+ SetSymbolResolver(LD.K, std::move(GVsResolver));
+
+ if (auto Err = BaseLayer.addModule(LD.K, std::move(GVsM)))
+ return Err;
+
+ LD.BaseLayerVModuleKeys.push_back(LD.K);
+
+ return Error::success();
+ }
+
+ static std::string mangle(StringRef Name, const DataLayout &DL) {
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mangler::getNameWithPrefix(MangledNameStream, Name, DL);
+ }
+ return MangledName;
+ }
+
+ Expected<JITTargetAddress>
+ extractAndCompile(LogicalDylib &LD,
+ typename LogicalDylib::SourceModuleHandle LMId,
+ Function &F) {
+ Module &SrcM = LD.getSourceModule(LMId);
+
+ // If F is a declaration we must already have compiled it.
+ if (F.isDeclaration())
+ return 0;
+
+ // Grab the name of the function being called here.
+ std::string CalledFnName = mangle(F.getName(), SrcM.getDataLayout());
+
+ JITTargetAddress CalledAddr = 0;
+ auto Part = Partition(F);
+ if (auto PartKeyOrErr = emitPartition(LD, LMId, Part)) {
+ auto &PartKey = *PartKeyOrErr;
+ for (auto *SubF : Part) {
+ std::string FnName = mangle(SubF->getName(), SrcM.getDataLayout());
+ if (auto FnBodySym = BaseLayer.findSymbolIn(PartKey, FnName, false)) {
+ if (auto FnBodyAddrOrErr = FnBodySym.getAddress()) {
+ JITTargetAddress FnBodyAddr = *FnBodyAddrOrErr;
+
+ // If this is the function we're calling record the address so we can
+ // return it from this function.
+ if (SubF == &F)
+ CalledAddr = FnBodyAddr;
+
+ // Update the function body pointer for the stub.
+ if (auto EC = LD.StubsMgr->updatePointer(FnName, FnBodyAddr))
+ return 0;
+
+ } else
+ return FnBodyAddrOrErr.takeError();
+ } else if (auto Err = FnBodySym.takeError())
+ return std::move(Err);
+ else
+ llvm_unreachable("Function not emitted for partition");
+ }
+
+ LD.BaseLayerVModuleKeys.push_back(PartKey);
+ } else
+ return PartKeyOrErr.takeError();
+
+ return CalledAddr;
+ }
+
+ template <typename PartitionT>
+ Expected<VModuleKey>
+ emitPartition(LogicalDylib &LD,
+ typename LogicalDylib::SourceModuleHandle LMId,
+ const PartitionT &Part) {
+ Module &SrcM = LD.getSourceModule(LMId);
+
+ // Create the module.
+ std::string NewName = SrcM.getName();
+ for (auto *F : Part) {
+ NewName += ".";
+ NewName += F->getName();
+ }
+
+ auto M = llvm::make_unique<Module>(NewName, SrcM.getContext());
+ M->setDataLayout(SrcM.getDataLayout());
+ ValueToValueMapTy VMap;
+
+ auto Materializer = createLambdaMaterializer([&LD, &LMId,
+ &M](Value *V) -> Value * {
+ if (auto *GV = dyn_cast<GlobalVariable>(V))
+ return cloneGlobalVariableDecl(*M, *GV);
+
+ if (auto *F = dyn_cast<Function>(V)) {
+ // Check whether we want to clone an available_externally definition.
+ if (!LD.getStubsToClone(LMId).count(F))
+ return cloneFunctionDecl(*M, *F);
+
+ // Ok - we want an inlinable stub. For that to work we need a decl
+ // for the stub pointer.
+ auto *StubPtr = createImplPointer(*F->getType(), *M,
+ F->getName() + "$stub_ptr", nullptr);
+ auto *ClonedF = cloneFunctionDecl(*M, *F);
+ makeStub(*ClonedF, *StubPtr);
+ ClonedF->setLinkage(GlobalValue::AvailableExternallyLinkage);
+ ClonedF->addFnAttr(Attribute::AlwaysInline);
+ return ClonedF;
+ }
+
+ if (auto *A = dyn_cast<GlobalAlias>(V)) {
+ auto *Ty = A->getValueType();
+ if (Ty->isFunctionTy())
+ return Function::Create(cast<FunctionType>(Ty),
+ GlobalValue::ExternalLinkage, A->getName(),
+ M.get());
+
+ return new GlobalVariable(*M, Ty, false, GlobalValue::ExternalLinkage,
+ nullptr, A->getName(), nullptr,
+ GlobalValue::NotThreadLocal,
+ A->getType()->getAddressSpace());
+ }
+
+ return nullptr;
+ });
+
+ // Create decls in the new module.
+ for (auto *F : Part)
+ cloneFunctionDecl(*M, *F, &VMap);
+
+ // Move the function bodies.
+ for (auto *F : Part)
+ moveFunctionBody(*F, VMap, &Materializer);
+
+ auto K = ES.allocateVModule();
+
+ auto LegacyLookup = [this, &LD](const std::string &Name) -> JITSymbol {
+ return LD.findSymbol(BaseLayer, Name, false);
+ };
+
+ // Create memory manager and symbol resolver.
+ auto Resolver = createSymbolResolver(
+ [&LD, LegacyLookup](SymbolFlagsMap &SymbolFlags,
+ const SymbolNameSet &Symbols) {
+ auto NotFoundViaLegacyLookup =
+ lookupFlagsWithLegacyFn(SymbolFlags, Symbols, LegacyLookup);
+ if (!NotFoundViaLegacyLookup) {
+ logAllUnhandledErrors(NotFoundViaLegacyLookup.takeError(), errs(),
+ "CODLayer/SubResolver flags lookup failed: ");
+ SymbolFlags.clear();
+ return SymbolNameSet();
+ }
+ return LD.BackingResolver->lookupFlags(SymbolFlags,
+ *NotFoundViaLegacyLookup);
+ },
+ [&LD, LegacyLookup](std::shared_ptr<AsynchronousSymbolQuery> Q,
+ SymbolNameSet Symbols) {
+ auto NotFoundViaLegacyLookup =
+ lookupWithLegacyFn(*Q, Symbols, LegacyLookup);
+ return LD.BackingResolver->lookup(Q,
+ std::move(NotFoundViaLegacyLookup));
+ });
+ SetSymbolResolver(K, std::move(Resolver));
+
+ if (auto Err = BaseLayer.addModule(std::move(K), std::move(M)))
+ return std::move(Err);
+
+ return K;
+ }
+
+ ExecutionSession &ES;
+ BaseLayerT &BaseLayer;
+ SymbolResolverGetter GetSymbolResolver;
+ SymbolResolverSetter SetSymbolResolver;
+ PartitioningFtor Partition;
+ CompileCallbackMgrT &CompileCallbackMgr;
+ IndirectStubsManagerBuilderT CreateIndirectStubsManager;
+
+ std::map<VModuleKey, LogicalDylib> LogicalDylibs;
+ bool CloneStubsIntoPartitions;
+};
+
+} // end namespace orc
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/CompileUtils.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/CompileUtils.h
new file mode 100644
index 0000000..a8050ff
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/CompileUtils.h
@@ -0,0 +1,121 @@
+//===- CompileUtils.h - Utilities for compiling IR in the JIT ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains utilities for compiling IR to object files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
+#define LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ExecutionEngine/ObjectCache.h"
+#include "llvm/ExecutionEngine/ObjectMemoryBuffer.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+#include <algorithm>
+#include <memory>
+
+namespace llvm {
+
+class MCContext;
+class Module;
+
+namespace orc {
+
+/// @brief Simple compile functor: Takes a single IR module and returns an
+/// ObjectFile.
+class SimpleCompiler {
+private:
+ class SmallVectorMemoryBuffer : public MemoryBuffer {
+ public:
+ SmallVectorMemoryBuffer(SmallVector<char, 0> Buffer)
+ : Buffer(std::move(Buffer)) {
+ init(this->Buffer.data(), this->Buffer.data() + this->Buffer.size(),
+ false);
+ }
+
+ BufferKind getBufferKind() const override { return MemoryBuffer_Malloc; }
+
+ private:
+ SmallVector<char, 0> Buffer;
+ };
+
+public:
+ using CompileResult = std::unique_ptr<MemoryBuffer>;
+
+ /// @brief Construct a simple compile functor with the given target.
+ SimpleCompiler(TargetMachine &TM, ObjectCache *ObjCache = nullptr)
+ : TM(TM), ObjCache(ObjCache) {}
+
+ /// @brief Set an ObjectCache to query before compiling.
+ void setObjectCache(ObjectCache *NewCache) { ObjCache = NewCache; }
+
+ /// @brief Compile a Module to an ObjectFile.
+ CompileResult operator()(Module &M) {
+ CompileResult CachedObject = tryToLoadFromObjectCache(M);
+ if (CachedObject)
+ return CachedObject;
+
+ SmallVector<char, 0> ObjBufferSV;
+
+ {
+ raw_svector_ostream ObjStream(ObjBufferSV);
+
+ legacy::PassManager PM;
+ MCContext *Ctx;
+ if (TM.addPassesToEmitMC(PM, Ctx, ObjStream))
+ llvm_unreachable("Target does not support MC emission.");
+ PM.run(M);
+ }
+
+ auto ObjBuffer =
+ llvm::make_unique<SmallVectorMemoryBuffer>(std::move(ObjBufferSV));
+ auto Obj =
+ object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef());
+
+ if (Obj) {
+ notifyObjectCompiled(M, *ObjBuffer);
+ return std::move(ObjBuffer);
+ }
+
+ // TODO: Actually report errors helpfully.
+ consumeError(Obj.takeError());
+ return nullptr;
+ }
+
+private:
+
+ CompileResult tryToLoadFromObjectCache(const Module &M) {
+ if (!ObjCache)
+ return CompileResult();
+
+ return ObjCache->getObject(&M);
+ }
+
+ void notifyObjectCompiled(const Module &M, const MemoryBuffer &ObjBuffer) {
+ if (ObjCache)
+ ObjCache->notifyObjectCompiled(&M, ObjBuffer.getMemBufferRef());
+ }
+
+ TargetMachine &TM;
+ ObjectCache *ObjCache = nullptr;
+};
+
+} // end namespace orc
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/Core.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/Core.h
new file mode 100644
index 0000000..26fec8b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/Core.h
@@ -0,0 +1,394 @@
+//===------ Core.h -- Core ORC APIs (Layer, JITDylib, etc.) -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains core ORC APIs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_CORE_H
+#define LLVM_EXECUTIONENGINE_ORC_CORE_H
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/SymbolStringPool.h"
+
+#include <map>
+#include <memory>
+#include <set>
+#include <vector>
+
+namespace llvm {
+namespace orc {
+
+/// VModuleKey provides a unique identifier (allocated and managed by
+/// ExecutionSessions) for a module added to the JIT.
+using VModuleKey = uint64_t;
+
+class VSO;
+
+/// @brief A set of symbol names (represented by SymbolStringPtrs for
+// efficiency).
+using SymbolNameSet = std::set<SymbolStringPtr>;
+
+/// @brief A map from symbol names (as SymbolStringPtrs) to JITSymbols
+/// (address/flags pairs).
+using SymbolMap = std::map<SymbolStringPtr, JITEvaluatedSymbol>;
+
+/// @brief A map from symbol names (as SymbolStringPtrs) to JITSymbolFlags.
+using SymbolFlagsMap = std::map<SymbolStringPtr, JITSymbolFlags>;
+
+/// @brief A symbol query that returns results via a callback when results are
+/// ready.
+///
+/// makes a callback when all symbols are available.
+class AsynchronousSymbolQuery {
+public:
+ /// @brief Callback to notify client that symbols have been resolved.
+ using SymbolsResolvedCallback = std::function<void(Expected<SymbolMap>)>;
+
+ /// @brief Callback to notify client that symbols are ready for execution.
+ using SymbolsReadyCallback = std::function<void(Error)>;
+
+ /// @brief Create a query for the given symbols, notify-resolved and
+ /// notify-ready callbacks.
+ AsynchronousSymbolQuery(const SymbolNameSet &Symbols,
+ SymbolsResolvedCallback NotifySymbolsResolved,
+ SymbolsReadyCallback NotifySymbolsReady);
+
+ /// @brief Notify client that the query failed.
+ ///
+ /// If the notify-resolved callback has not been made yet, then it is called
+ /// with the given error, and the notify-finalized callback is never made.
+ ///
+ /// If the notify-resolved callback has already been made then then the
+ /// notify-finalized callback is called with the given error.
+ ///
+ /// It is illegal to call setFailed after both callbacks have been made.
+ void setFailed(Error Err);
+
+ /// @brief Set the resolved symbol information for the given symbol name.
+ ///
+ /// If this symbol was the last one not resolved, this will trigger a call to
+ /// the notify-finalized callback passing the completed sybol map.
+ void setDefinition(SymbolStringPtr Name, JITEvaluatedSymbol Sym);
+
+ /// @brief Notify the query that a requested symbol is ready for execution.
+ ///
+ /// This decrements the query's internal count of not-yet-ready symbols. If
+ /// this call to notifySymbolFinalized sets the counter to zero, it will call
+ /// the notify-finalized callback with Error::success as the value.
+ void notifySymbolFinalized();
+
+private:
+ SymbolMap Symbols;
+ size_t OutstandingResolutions = 0;
+ size_t OutstandingFinalizations = 0;
+ SymbolsResolvedCallback NotifySymbolsResolved;
+ SymbolsReadyCallback NotifySymbolsReady;
+};
+
+/// @brief SymbolResolver is a composable interface for looking up symbol flags
+/// and addresses using the AsynchronousSymbolQuery type. It will
+/// eventually replace the LegacyJITSymbolResolver interface as the
+/// stardard ORC symbol resolver type.
+class SymbolResolver {
+public:
+ virtual ~SymbolResolver() = default;
+
+ /// @brief Returns the flags for each symbol in Symbols that can be found,
+ /// along with the set of symbol that could not be found.
+ virtual SymbolNameSet lookupFlags(SymbolFlagsMap &Flags,
+ const SymbolNameSet &Symbols) = 0;
+
+ /// @brief For each symbol in Symbols that can be found, assigns that symbols
+ /// value in Query. Returns the set of symbols that could not be found.
+ virtual SymbolNameSet lookup(std::shared_ptr<AsynchronousSymbolQuery> Query,
+ SymbolNameSet Symbols) = 0;
+
+private:
+ virtual void anchor();
+};
+
+/// @brief Implements SymbolResolver with a pair of supplied function objects
+/// for convenience. See createSymbolResolver.
+template <typename LookupFlagsFn, typename LookupFn>
+class LambdaSymbolResolver final : public SymbolResolver {
+public:
+ template <typename LookupFlagsFnRef, typename LookupFnRef>
+ LambdaSymbolResolver(LookupFlagsFnRef &&LookupFlags, LookupFnRef &&Lookup)
+ : LookupFlags(std::forward<LookupFlagsFnRef>(LookupFlags)),
+ Lookup(std::forward<LookupFnRef>(Lookup)) {}
+
+ SymbolNameSet lookupFlags(SymbolFlagsMap &Flags,
+ const SymbolNameSet &Symbols) final {
+ return LookupFlags(Flags, Symbols);
+ }
+
+ SymbolNameSet lookup(std::shared_ptr<AsynchronousSymbolQuery> Query,
+ SymbolNameSet Symbols) final {
+ return Lookup(std::move(Query), std::move(Symbols));
+ }
+
+private:
+ LookupFlagsFn LookupFlags;
+ LookupFn Lookup;
+};
+
+/// @brief Creates a SymbolResolver implementation from the pair of supplied
+/// function objects.
+template <typename LookupFlagsFn, typename LookupFn>
+std::unique_ptr<LambdaSymbolResolver<
+ typename std::remove_cv<
+ typename std::remove_reference<LookupFlagsFn>::type>::type,
+ typename std::remove_cv<
+ typename std::remove_reference<LookupFn>::type>::type>>
+createSymbolResolver(LookupFlagsFn &&LookupFlags, LookupFn &&Lookup) {
+ using LambdaSymbolResolverImpl = LambdaSymbolResolver<
+ typename std::remove_cv<
+ typename std::remove_reference<LookupFlagsFn>::type>::type,
+ typename std::remove_cv<
+ typename std::remove_reference<LookupFn>::type>::type>;
+ return llvm::make_unique<LambdaSymbolResolverImpl>(
+ std::forward<LookupFlagsFn>(LookupFlags), std::forward<LookupFn>(Lookup));
+}
+
+/// @brief A MaterializationUnit represents a set of symbol definitions that can
+/// be materialized as a group, or individually discarded (when
+/// overriding definitions are encountered).
+///
+/// MaterializationUnits are used when providing lazy definitions of symbols to
+/// VSOs. The VSO will call materialize when the address of a symbol is
+/// requested via the lookup method. The VSO will call discard if a stronger
+/// definition is added or already present.
+class MaterializationUnit {
+public:
+ virtual ~MaterializationUnit() {}
+
+ /// @brief Return the set of symbols that this source provides.
+ virtual SymbolFlagsMap getSymbols() = 0;
+
+ /// @brief Implementations of this method should materialize all symbols
+ /// in the materialzation unit, except for those that have been
+ /// previously discarded.
+ virtual Error materialize(VSO &V) = 0;
+
+ /// @brief Implementations of this method should discard the given symbol
+ /// from the source (e.g. if the source is an LLVM IR Module and the
+ /// symbol is a function, delete the function body or mark it available
+ /// externally).
+ virtual void discard(VSO &V, SymbolStringPtr Name) = 0;
+
+private:
+ virtual void anchor();
+};
+
+/// @brief Represents a dynamic linkage unit in a JIT process.
+///
+/// VSO acts as a symbol table (symbol definitions can be set and the dylib
+/// queried to find symbol addresses) and as a key for tracking resources
+/// (since a VSO's address is fixed).
+class VSO {
+ friend class ExecutionSession;
+
+public:
+ enum RelativeLinkageStrength {
+ NewDefinitionIsStronger,
+ DuplicateDefinition,
+ ExistingDefinitionIsStronger
+ };
+
+ using SetDefinitionsResult =
+ std::map<SymbolStringPtr, RelativeLinkageStrength>;
+
+ using MaterializationUnitList =
+ std::vector<std::unique_ptr<MaterializationUnit>>;
+
+ struct LookupResult {
+ MaterializationUnitList MaterializationUnits;
+ SymbolNameSet UnresolvedSymbols;
+ };
+
+ VSO() = default;
+
+ VSO(const VSO &) = delete;
+ VSO &operator=(const VSO &) = delete;
+ VSO(VSO &&) = delete;
+ VSO &operator=(VSO &&) = delete;
+
+ /// @brief Compare new linkage with existing linkage.
+ static RelativeLinkageStrength
+ compareLinkage(Optional<JITSymbolFlags> OldFlags, JITSymbolFlags NewFlags);
+
+ /// @brief Compare new linkage with an existing symbol's linkage.
+ RelativeLinkageStrength compareLinkage(SymbolStringPtr Name,
+ JITSymbolFlags NewFlags) const;
+
+ /// @brief Adds the given symbols to the mapping as resolved, finalized
+ /// symbols.
+ ///
+ /// FIXME: We can take this by const-ref once symbol-based laziness is
+ /// removed.
+ Error define(SymbolMap NewSymbols);
+
+ /// @brief Adds the given symbols to the mapping as lazy symbols.
+ Error defineLazy(std::unique_ptr<MaterializationUnit> Source);
+
+ /// @brief Add the given symbol/address mappings to the dylib, but do not
+ /// mark the symbols as finalized yet.
+ void resolve(SymbolMap SymbolValues);
+
+ /// @brief Finalize the given symbols.
+ void finalize(SymbolNameSet SymbolsToFinalize);
+
+ /// @brief Look up the flags for the given symbols.
+ ///
+ /// Returns the flags for the give symbols, together with the set of symbols
+ /// not found.
+ SymbolNameSet lookupFlags(SymbolFlagsMap &Flags, SymbolNameSet Symbols);
+
+ /// @brief Apply the given query to the given symbols in this VSO.
+ ///
+ /// For symbols in this VSO that have already been materialized, their address
+ /// will be set in the query immediately.
+ ///
+ /// For symbols in this VSO that have not been materialized, the query will be
+ /// recorded and the source for those symbols (plus the set of symbols to be
+ /// materialized by that source) will be returned as the MaterializationWork
+ /// field of the LookupResult.
+ ///
+ /// Any symbols not found in this VSO will be returned in the
+ /// UnresolvedSymbols field of the LookupResult.
+ LookupResult lookup(std::shared_ptr<AsynchronousSymbolQuery> Query,
+ SymbolNameSet Symbols);
+
+private:
+ class MaterializationInfo {
+ public:
+ using QueryList = std::vector<std::shared_ptr<AsynchronousSymbolQuery>>;
+
+ MaterializationInfo(size_t SymbolsRemaining,
+ std::unique_ptr<MaterializationUnit> MU);
+
+ uint64_t SymbolsRemaining;
+ std::unique_ptr<MaterializationUnit> MU;
+ SymbolMap Symbols;
+ std::map<SymbolStringPtr, QueryList> PendingResolution;
+ std::map<SymbolStringPtr, QueryList> PendingFinalization;
+ };
+
+ using MaterializationInfoSet = std::set<std::unique_ptr<MaterializationInfo>>;
+
+ using MaterializationInfoIterator = MaterializationInfoSet::iterator;
+
+ class SymbolTableEntry {
+ public:
+ SymbolTableEntry(JITSymbolFlags SymbolFlags,
+ MaterializationInfoIterator MaterializationInfoItr);
+ SymbolTableEntry(JITEvaluatedSymbol Sym);
+ SymbolTableEntry(SymbolTableEntry &&Other);
+ ~SymbolTableEntry();
+
+ SymbolTableEntry &operator=(JITEvaluatedSymbol Sym);
+
+ JITSymbolFlags getFlags() const;
+ void replaceWith(VSO &V, SymbolStringPtr Name, JITSymbolFlags Flags,
+ MaterializationInfoIterator NewMaterializationInfoItr);
+ std::unique_ptr<MaterializationUnit>
+ query(SymbolStringPtr Name, std::shared_ptr<AsynchronousSymbolQuery> Query);
+ void resolve(VSO &V, SymbolStringPtr Name, JITEvaluatedSymbol Sym);
+ void finalize(VSO &V, SymbolStringPtr Name);
+ void discard(VSO &V, SymbolStringPtr Name);
+
+ private:
+ void destroy();
+
+ JITSymbolFlags Flags;
+ MaterializationInfoIterator MII;
+ union {
+ JITTargetAddress Address;
+ MaterializationInfoIterator MaterializationInfoItr;
+ };
+ };
+
+ std::map<SymbolStringPtr, SymbolTableEntry> Symbols;
+ MaterializationInfoSet MaterializationInfos;
+};
+
+/// @brief An ExecutionSession represents a running JIT program.
+class ExecutionSession {
+public:
+ using ErrorReporter = std::function<void(Error)>;
+
+ /// @brief Construct an ExecutionEngine.
+ ///
+ /// SymbolStringPools may be shared between ExecutionSessions.
+ ExecutionSession(SymbolStringPool &SSP);
+
+ /// @brief Returns the SymbolStringPool for this ExecutionSession.
+ SymbolStringPool &getSymbolStringPool() const { return SSP; }
+
+ /// @brief Set the error reporter function.
+ void setErrorReporter(ErrorReporter ReportError) {
+ this->ReportError = std::move(ReportError);
+ }
+
+ /// @brief Report a error for this execution session.
+ ///
+ /// Unhandled errors can be sent here to log them.
+ void reportError(Error Err) { ReportError(std::move(Err)); }
+
+ /// @brief Allocate a module key for a new module to add to the JIT.
+ VModuleKey allocateVModule();
+
+ /// @brief Return a module key to the ExecutionSession so that it can be
+ /// re-used. This should only be done once all resources associated
+ //// with the original key have been released.
+ void releaseVModule(VModuleKey Key);
+
+public:
+ static void logErrorsToStdErr(Error Err);
+
+ SymbolStringPool &SSP;
+ VModuleKey LastKey = 0;
+ ErrorReporter ReportError = logErrorsToStdErr;
+};
+
+/// Runs Materializers on the current thread and reports errors to the given
+/// ExecutionSession.
+class MaterializeOnCurrentThread {
+public:
+ MaterializeOnCurrentThread(ExecutionSession &ES) : ES(ES) {}
+
+ void operator()(VSO &V, std::unique_ptr<MaterializationUnit> MU) {
+ if (auto Err = MU->materialize(V))
+ ES.reportError(std::move(Err));
+ }
+
+private:
+ ExecutionSession &ES;
+};
+
+/// Materialization function object wrapper for the lookup method.
+using MaterializationDispatcher =
+ std::function<void(VSO &V, std::unique_ptr<MaterializationUnit> S)>;
+
+/// @brief Look up a set of symbols by searching a list of VSOs.
+///
+/// All VSOs in the list should be non-null.
+Expected<SymbolMap> lookup(const std::vector<VSO *> &VSOs, SymbolNameSet Names,
+ MaterializationDispatcher DispatchMaterialization);
+
+/// @brief Look up a symbol by searching a list of VSOs.
+Expected<JITEvaluatedSymbol>
+lookup(const std::vector<VSO *> VSOs, SymbolStringPtr Name,
+ MaterializationDispatcher DispatchMaterialization);
+
+} // End namespace orc
+} // End namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_CORE_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
new file mode 100644
index 0000000..d466df8
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
@@ -0,0 +1,190 @@
+//===- ExecutionUtils.h - Utilities for executing code in Orc ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains utilities for executing code in Orc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H
+#define LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/ExecutionEngine/Orc/OrcError.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include <algorithm>
+#include <cstdint>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class ConstantArray;
+class GlobalVariable;
+class Function;
+class Module;
+class Value;
+
+namespace orc {
+
+/// @brief This iterator provides a convenient way to iterate over the elements
+/// of an llvm.global_ctors/llvm.global_dtors instance.
+///
+/// The easiest way to get hold of instances of this class is to use the
+/// getConstructors/getDestructors functions.
+class CtorDtorIterator {
+public:
+ /// @brief Accessor for an element of the global_ctors/global_dtors array.
+ ///
+ /// This class provides a read-only view of the element with any casts on
+ /// the function stripped away.
+ struct Element {
+ Element(unsigned Priority, Function *Func, Value *Data)
+ : Priority(Priority), Func(Func), Data(Data) {}
+
+ unsigned Priority;
+ Function *Func;
+ Value *Data;
+ };
+
+ /// @brief Construct an iterator instance. If End is true then this iterator
+ /// acts as the end of the range, otherwise it is the beginning.
+ CtorDtorIterator(const GlobalVariable *GV, bool End);
+
+ /// @brief Test iterators for equality.
+ bool operator==(const CtorDtorIterator &Other) const;
+
+ /// @brief Test iterators for inequality.
+ bool operator!=(const CtorDtorIterator &Other) const;
+
+ /// @brief Pre-increment iterator.
+ CtorDtorIterator& operator++();
+
+ /// @brief Post-increment iterator.
+ CtorDtorIterator operator++(int);
+
+ /// @brief Dereference iterator. The resulting value provides a read-only view
+ /// of this element of the global_ctors/global_dtors list.
+ Element operator*() const;
+
+private:
+ const ConstantArray *InitList;
+ unsigned I;
+};
+
+/// @brief Create an iterator range over the entries of the llvm.global_ctors
+/// array.
+iterator_range<CtorDtorIterator> getConstructors(const Module &M);
+
+/// @brief Create an iterator range over the entries of the llvm.global_ctors
+/// array.
+iterator_range<CtorDtorIterator> getDestructors(const Module &M);
+
+/// @brief Convenience class for recording constructor/destructor names for
+/// later execution.
+template <typename JITLayerT>
+class CtorDtorRunner {
+public:
+ /// @brief Construct a CtorDtorRunner for the given range using the given
+ /// name mangling function.
+ CtorDtorRunner(std::vector<std::string> CtorDtorNames, VModuleKey K)
+ : CtorDtorNames(std::move(CtorDtorNames)), K(K) {}
+
+ /// @brief Run the recorded constructors/destructors through the given JIT
+ /// layer.
+ Error runViaLayer(JITLayerT &JITLayer) const {
+ using CtorDtorTy = void (*)();
+
+ for (const auto &CtorDtorName : CtorDtorNames) {
+ if (auto CtorDtorSym = JITLayer.findSymbolIn(K, CtorDtorName, false)) {
+ if (auto AddrOrErr = CtorDtorSym.getAddress()) {
+ CtorDtorTy CtorDtor =
+ reinterpret_cast<CtorDtorTy>(static_cast<uintptr_t>(*AddrOrErr));
+ CtorDtor();
+ } else
+ return AddrOrErr.takeError();
+ } else {
+ if (auto Err = CtorDtorSym.takeError())
+ return Err;
+ else
+ return make_error<JITSymbolNotFound>(CtorDtorName);
+ }
+ }
+ return Error::success();
+ }
+
+private:
+ std::vector<std::string> CtorDtorNames;
+ orc::VModuleKey K;
+};
+
+/// @brief Support class for static dtor execution. For hosted (in-process) JITs
+/// only!
+///
+/// If a __cxa_atexit function isn't found C++ programs that use static
+/// destructors will fail to link. However, we don't want to use the host
+/// process's __cxa_atexit, because it will schedule JIT'd destructors to run
+/// after the JIT has been torn down, which is no good. This class makes it easy
+/// to override __cxa_atexit (and the related __dso_handle).
+///
+/// To use, clients should manually call searchOverrides from their symbol
+/// resolver. This should generally be done after attempting symbol resolution
+/// inside the JIT, but before searching the host process's symbol table. When
+/// the client determines that destructors should be run (generally at JIT
+/// teardown or after a return from main), the runDestructors method should be
+/// called.
+class LocalCXXRuntimeOverrides {
+public:
+ /// Create a runtime-overrides class.
+ template <typename MangleFtorT>
+ LocalCXXRuntimeOverrides(const MangleFtorT &Mangle) {
+ addOverride(Mangle("__dso_handle"), toTargetAddress(&DSOHandleOverride));
+ addOverride(Mangle("__cxa_atexit"), toTargetAddress(&CXAAtExitOverride));
+ }
+
+ /// Search overrided symbols.
+ JITEvaluatedSymbol searchOverrides(const std::string &Name) {
+ auto I = CXXRuntimeOverrides.find(Name);
+ if (I != CXXRuntimeOverrides.end())
+ return JITEvaluatedSymbol(I->second, JITSymbolFlags::Exported);
+ return nullptr;
+ }
+
+ /// Run any destructors recorded by the overriden __cxa_atexit function
+ /// (CXAAtExitOverride).
+ void runDestructors();
+
+private:
+ template <typename PtrTy>
+ JITTargetAddress toTargetAddress(PtrTy* P) {
+ return static_cast<JITTargetAddress>(reinterpret_cast<uintptr_t>(P));
+ }
+
+ void addOverride(const std::string &Name, JITTargetAddress Addr) {
+ CXXRuntimeOverrides.insert(std::make_pair(Name, Addr));
+ }
+
+ StringMap<JITTargetAddress> CXXRuntimeOverrides;
+
+ using DestructorPtr = void (*)(void *);
+ using CXXDestructorDataPair = std::pair<DestructorPtr, void *>;
+ using CXXDestructorDataPairList = std::vector<CXXDestructorDataPair>;
+ CXXDestructorDataPairList DSOHandleOverride;
+ static int CXAAtExitOverride(DestructorPtr Destructor, void *Arg,
+ void *DSOHandle);
+};
+
+} // end namespace orc
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h
new file mode 100644
index 0000000..8a48c36
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h
@@ -0,0 +1,112 @@
+//===- GlobalMappingLayer.h - Run all IR through a functor ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Convenience layer for injecting symbols that will appear in calls to
+// findSymbol.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_GLOBALMAPPINGLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_GLOBALMAPPINGLAYER_H
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include <map>
+#include <memory>
+#include <string>
+
+namespace llvm {
+
+class Module;
+class JITSymbolResolver;
+
+namespace orc {
+
+/// @brief Global mapping layer.
+///
+/// This layer overrides the findSymbol method to first search a local symbol
+/// table that the client can define. It can be used to inject new symbol
+/// mappings into the JIT. Beware, however: symbols within a single IR module or
+/// object file will still resolve locally (via RuntimeDyld's symbol table) -
+/// such internal references cannot be overriden via this layer.
+template <typename BaseLayerT>
+class GlobalMappingLayer {
+public:
+
+ /// @brief Handle to an added module.
+ using ModuleHandleT = typename BaseLayerT::ModuleHandleT;
+
+ /// @brief Construct an GlobalMappingLayer with the given BaseLayer
+ GlobalMappingLayer(BaseLayerT &BaseLayer) : BaseLayer(BaseLayer) {}
+
+ /// @brief Add the given module to the JIT.
+ /// @return A handle for the added modules.
+ Expected<ModuleHandleT>
+ addModule(std::shared_ptr<Module> M,
+ std::shared_ptr<JITSymbolResolver> Resolver) {
+ return BaseLayer.addModule(std::move(M), std::move(Resolver));
+ }
+
+ /// @brief Remove the module set associated with the handle H.
+ Error removeModule(ModuleHandleT H) { return BaseLayer.removeModule(H); }
+
+ /// @brief Manually set the address to return for the given symbol.
+ void setGlobalMapping(const std::string &Name, JITTargetAddress Addr) {
+ SymbolTable[Name] = Addr;
+ }
+
+ /// @brief Remove the given symbol from the global mapping.
+ void eraseGlobalMapping(const std::string &Name) {
+ SymbolTable.erase(Name);
+ }
+
+ /// @brief Search for the given named symbol.
+ ///
+ /// This method will first search the local symbol table, returning
+ /// any symbol found there. If the symbol is not found in the local
+ /// table then this call will be passed through to the base layer.
+ ///
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it exists.
+ JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+ auto I = SymbolTable.find(Name);
+ if (I != SymbolTable.end())
+ return JITSymbol(I->second, JITSymbolFlags::Exported);
+ return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Get the address of the given symbol in the context of the of the
+ /// module represented by the handle H. This call is forwarded to the
+ /// base layer's implementation.
+ /// @param H The handle for the module to search in.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it is found in the
+ /// given module.
+ JITSymbol findSymbolIn(ModuleHandleT H, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ return BaseLayer.findSymbolIn(H, Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Immediately emit and finalize the module set represented by the
+ /// given handle.
+ /// @param H Handle for module set to emit/finalize.
+ Error emitAndFinalize(ModuleHandleT H) {
+ return BaseLayer.emitAndFinalize(H);
+ }
+
+private:
+ BaseLayerT &BaseLayer;
+ std::map<std::string, JITTargetAddress> SymbolTable;
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_GLOBALMAPPINGLAYER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
new file mode 100644
index 0000000..a7f9416
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
@@ -0,0 +1,107 @@
+//===- IRCompileLayer.h -- Eagerly compile IR for JIT -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains the definition for a basic, eagerly compiling layer of the JIT.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_IRCOMPILELAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_IRCOMPILELAYER_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/Support/Error.h"
+#include <memory>
+#include <string>
+
+namespace llvm {
+
+class Module;
+
+namespace orc {
+
+/// @brief Eager IR compiling layer.
+///
+/// This layer immediately compiles each IR module added via addModule to an
+/// object file and adds this module file to the layer below, which must
+/// implement the object layer concept.
+template <typename BaseLayerT, typename CompileFtor>
+class IRCompileLayer {
+public:
+ /// @brief Callback type for notifications when modules are compiled.
+ using NotifyCompiledCallback =
+ std::function<void(VModuleKey K, std::unique_ptr<Module>)>;
+
+ /// @brief Construct an IRCompileLayer with the given BaseLayer, which must
+ /// implement the ObjectLayer concept.
+ IRCompileLayer(
+ BaseLayerT &BaseLayer, CompileFtor Compile,
+ NotifyCompiledCallback NotifyCompiled = NotifyCompiledCallback())
+ : BaseLayer(BaseLayer), Compile(std::move(Compile)),
+ NotifyCompiled(std::move(NotifyCompiled)) {}
+
+ /// @brief Get a reference to the compiler functor.
+ CompileFtor& getCompiler() { return Compile; }
+
+ /// @brief (Re)set the NotifyCompiled callback.
+ void setNotifyCompiled(NotifyCompiledCallback NotifyCompiled) {
+ this->NotifyCompiled = std::move(NotifyCompiled);
+ }
+
+ /// @brief Compile the module, and add the resulting object to the base layer
+ /// along with the given memory manager and symbol resolver.
+ Error addModule(VModuleKey K, std::unique_ptr<Module> M) {
+ if (auto Err = BaseLayer.addObject(std::move(K), Compile(*M)))
+ return Err;
+ if (NotifyCompiled)
+ NotifyCompiled(std::move(K), std::move(M));
+ return Error::success();
+ }
+
+ /// @brief Remove the module associated with the VModuleKey K.
+ Error removeModule(VModuleKey K) { return BaseLayer.removeObject(K); }
+
+ /// @brief Search for the given named symbol.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it exists.
+ JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+ return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Get the address of the given symbol in compiled module represented
+ /// by the handle H. This call is forwarded to the base layer's
+ /// implementation.
+ /// @param K The VModuleKey for the module to search in.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it is found in the
+ /// given module.
+ JITSymbol findSymbolIn(VModuleKey K, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ return BaseLayer.findSymbolIn(K, Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Immediately emit and finalize the module represented by the given
+ /// handle.
+ /// @param K The VModuleKey for the module to emit/finalize.
+ Error emitAndFinalize(VModuleKey K) { return BaseLayer.emitAndFinalize(K); }
+
+private:
+ BaseLayerT &BaseLayer;
+ CompileFtor Compile;
+ NotifyCompiledCallback NotifyCompiled;
+};
+
+} // end namespace orc
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_IRCOMPILINGLAYER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
new file mode 100644
index 0000000..4f1fe7b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
@@ -0,0 +1,90 @@
+//===- IRTransformLayer.h - Run all IR through a functor --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Run all IR passed in through a user supplied functor.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include <memory>
+#include <string>
+
+namespace llvm {
+class Module;
+namespace orc {
+
+/// @brief IR mutating layer.
+///
+/// This layer applies a user supplied transform to each module that is added,
+/// then adds the transformed module to the layer below.
+template <typename BaseLayerT, typename TransformFtor>
+class IRTransformLayer {
+public:
+
+ /// @brief Construct an IRTransformLayer with the given BaseLayer
+ IRTransformLayer(BaseLayerT &BaseLayer,
+ TransformFtor Transform = TransformFtor())
+ : BaseLayer(BaseLayer), Transform(std::move(Transform)) {}
+
+ /// @brief Apply the transform functor to the module, then add the module to
+ /// the layer below, along with the memory manager and symbol resolver.
+ ///
+ /// @return A handle for the added modules.
+ Error addModule(VModuleKey K, std::unique_ptr<Module> M) {
+ return BaseLayer.addModule(std::move(K), Transform(std::move(M)));
+ }
+
+ /// @brief Remove the module associated with the VModuleKey K.
+ Error removeModule(VModuleKey K) { return BaseLayer.removeModule(K); }
+
+ /// @brief Search for the given named symbol.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it exists.
+ JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+ return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Get the address of the given symbol in the context of the module
+ /// represented by the VModuleKey K. This call is forwarded to the base
+ /// layer's implementation.
+ /// @param K The VModuleKey for the module to search in.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it is found in the
+ /// given module.
+ JITSymbol findSymbolIn(VModuleKey K, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ return BaseLayer.findSymbolIn(K, Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Immediately emit and finalize the module represented by the given
+ /// VModuleKey.
+ /// @param K The VModuleKey for the module to emit/finalize.
+ Error emitAndFinalize(VModuleKey K) { return BaseLayer.emitAndFinalize(K); }
+
+ /// @brief Access the transform functor directly.
+ TransformFtor& getTransform() { return Transform; }
+
+ /// @brief Access the mumate functor directly.
+ const TransformFtor& getTransform() const { return Transform; }
+
+private:
+ BaseLayerT &BaseLayer;
+ TransformFtor Transform;
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
new file mode 100644
index 0000000..029b86a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
@@ -0,0 +1,451 @@
+//===- IndirectionUtils.h - Utilities for adding indirections ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains utilities for adding indirections and breaking up modules.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
+#define LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/Memory.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <functional>
+#include <map>
+#include <memory>
+#include <system_error>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class Constant;
+class Function;
+class FunctionType;
+class GlobalAlias;
+class GlobalVariable;
+class Module;
+class PointerType;
+class Triple;
+class Value;
+
+namespace orc {
+
+/// @brief Target-independent base class for compile callback management.
+class JITCompileCallbackManager {
+public:
+ using CompileFtor = std::function<JITTargetAddress()>;
+
+ /// @brief Handle to a newly created compile callback. Can be used to get an
+ /// IR constant representing the address of the trampoline, and to set
+ /// the compile action for the callback.
+ class CompileCallbackInfo {
+ public:
+ CompileCallbackInfo(JITTargetAddress Addr, CompileFtor &Compile)
+ : Addr(Addr), Compile(Compile) {}
+
+ JITTargetAddress getAddress() const { return Addr; }
+ void setCompileAction(CompileFtor Compile) {
+ this->Compile = std::move(Compile);
+ }
+
+ private:
+ JITTargetAddress Addr;
+ CompileFtor &Compile;
+ };
+
+ /// @brief Construct a JITCompileCallbackManager.
+ /// @param ErrorHandlerAddress The address of an error handler in the target
+ /// process to be used if a compile callback fails.
+ JITCompileCallbackManager(JITTargetAddress ErrorHandlerAddress)
+ : ErrorHandlerAddress(ErrorHandlerAddress) {}
+
+ virtual ~JITCompileCallbackManager() = default;
+
+ /// @brief Execute the callback for the given trampoline id. Called by the JIT
+ /// to compile functions on demand.
+ JITTargetAddress executeCompileCallback(JITTargetAddress TrampolineAddr) {
+ auto I = ActiveTrampolines.find(TrampolineAddr);
+ // FIXME: Also raise an error in the Orc error-handler when we finally have
+ // one.
+ if (I == ActiveTrampolines.end())
+ return ErrorHandlerAddress;
+
+ // Found a callback handler. Yank this trampoline out of the active list and
+ // put it back in the available trampolines list, then try to run the
+ // handler's compile and update actions.
+ // Moving the trampoline ID back to the available list first means there's
+ // at
+ // least one available trampoline if the compile action triggers a request
+ // for
+ // a new one.
+ auto Compile = std::move(I->second);
+ ActiveTrampolines.erase(I);
+ AvailableTrampolines.push_back(TrampolineAddr);
+
+ if (auto Addr = Compile())
+ return Addr;
+
+ return ErrorHandlerAddress;
+ }
+
+ /// @brief Reserve a compile callback.
+ Expected<CompileCallbackInfo> getCompileCallback() {
+ if (auto TrampolineAddrOrErr = getAvailableTrampolineAddr()) {
+ const auto &TrampolineAddr = *TrampolineAddrOrErr;
+ auto &Compile = this->ActiveTrampolines[TrampolineAddr];
+ return CompileCallbackInfo(TrampolineAddr, Compile);
+ } else
+ return TrampolineAddrOrErr.takeError();
+ }
+
+ /// @brief Get a CompileCallbackInfo for an existing callback.
+ CompileCallbackInfo getCompileCallbackInfo(JITTargetAddress TrampolineAddr) {
+ auto I = ActiveTrampolines.find(TrampolineAddr);
+ assert(I != ActiveTrampolines.end() && "Not an active trampoline.");
+ return CompileCallbackInfo(I->first, I->second);
+ }
+
+ /// @brief Release a compile callback.
+ ///
+ /// Note: Callbacks are auto-released after they execute. This method should
+ /// only be called to manually release a callback that is not going to
+ /// execute.
+ void releaseCompileCallback(JITTargetAddress TrampolineAddr) {
+ auto I = ActiveTrampolines.find(TrampolineAddr);
+ assert(I != ActiveTrampolines.end() && "Not an active trampoline.");
+ ActiveTrampolines.erase(I);
+ AvailableTrampolines.push_back(TrampolineAddr);
+ }
+
+protected:
+ JITTargetAddress ErrorHandlerAddress;
+
+ using TrampolineMapT = std::map<JITTargetAddress, CompileFtor>;
+ TrampolineMapT ActiveTrampolines;
+ std::vector<JITTargetAddress> AvailableTrampolines;
+
+private:
+ Expected<JITTargetAddress> getAvailableTrampolineAddr() {
+ if (this->AvailableTrampolines.empty())
+ if (auto Err = grow())
+ return std::move(Err);
+ assert(!this->AvailableTrampolines.empty() &&
+ "Failed to grow available trampolines.");
+ JITTargetAddress TrampolineAddr = this->AvailableTrampolines.back();
+ this->AvailableTrampolines.pop_back();
+ return TrampolineAddr;
+ }
+
+ // Create new trampolines - to be implemented in subclasses.
+ virtual Error grow() = 0;
+
+ virtual void anchor();
+};
+
+/// @brief Manage compile callbacks for in-process JITs.
+template <typename TargetT>
+class LocalJITCompileCallbackManager : public JITCompileCallbackManager {
+public:
+ /// @brief Construct a InProcessJITCompileCallbackManager.
+ /// @param ErrorHandlerAddress The address of an error handler in the target
+ /// process to be used if a compile callback fails.
+ LocalJITCompileCallbackManager(JITTargetAddress ErrorHandlerAddress)
+ : JITCompileCallbackManager(ErrorHandlerAddress) {
+ /// Set up the resolver block.
+ std::error_code EC;
+ ResolverBlock = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
+ TargetT::ResolverCodeSize, nullptr,
+ sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
+ assert(!EC && "Failed to allocate resolver block");
+
+ TargetT::writeResolverCode(static_cast<uint8_t *>(ResolverBlock.base()),
+ &reenter, this);
+
+ EC = sys::Memory::protectMappedMemory(ResolverBlock.getMemoryBlock(),
+ sys::Memory::MF_READ |
+ sys::Memory::MF_EXEC);
+ assert(!EC && "Failed to mprotect resolver block");
+ }
+
+private:
+ static JITTargetAddress reenter(void *CCMgr, void *TrampolineId) {
+ JITCompileCallbackManager *Mgr =
+ static_cast<JITCompileCallbackManager *>(CCMgr);
+ return Mgr->executeCompileCallback(
+ static_cast<JITTargetAddress>(
+ reinterpret_cast<uintptr_t>(TrampolineId)));
+ }
+
+ Error grow() override {
+ assert(this->AvailableTrampolines.empty() && "Growing prematurely?");
+
+ std::error_code EC;
+ auto TrampolineBlock =
+ sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
+ sys::Process::getPageSize(), nullptr,
+ sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
+ if (EC)
+ return errorCodeToError(EC);
+
+ unsigned NumTrampolines =
+ (sys::Process::getPageSize() - TargetT::PointerSize) /
+ TargetT::TrampolineSize;
+
+ uint8_t *TrampolineMem = static_cast<uint8_t *>(TrampolineBlock.base());
+ TargetT::writeTrampolines(TrampolineMem, ResolverBlock.base(),
+ NumTrampolines);
+
+ for (unsigned I = 0; I < NumTrampolines; ++I)
+ this->AvailableTrampolines.push_back(
+ static_cast<JITTargetAddress>(reinterpret_cast<uintptr_t>(
+ TrampolineMem + (I * TargetT::TrampolineSize))));
+
+ if (auto EC = sys::Memory::protectMappedMemory(
+ TrampolineBlock.getMemoryBlock(),
+ sys::Memory::MF_READ | sys::Memory::MF_EXEC))
+ return errorCodeToError(EC);
+
+ TrampolineBlocks.push_back(std::move(TrampolineBlock));
+ return Error::success();
+ }
+
+ sys::OwningMemoryBlock ResolverBlock;
+ std::vector<sys::OwningMemoryBlock> TrampolineBlocks;
+};
+
+/// @brief Base class for managing collections of named indirect stubs.
+class IndirectStubsManager {
+public:
+ /// @brief Map type for initializing the manager. See init.
+ using StubInitsMap = StringMap<std::pair<JITTargetAddress, JITSymbolFlags>>;
+
+ virtual ~IndirectStubsManager() = default;
+
+ /// @brief Create a single stub with the given name, target address and flags.
+ virtual Error createStub(StringRef StubName, JITTargetAddress StubAddr,
+ JITSymbolFlags StubFlags) = 0;
+
+ /// @brief Create StubInits.size() stubs with the given names, target
+ /// addresses, and flags.
+ virtual Error createStubs(const StubInitsMap &StubInits) = 0;
+
+ /// @brief Find the stub with the given name. If ExportedStubsOnly is true,
+ /// this will only return a result if the stub's flags indicate that it
+ /// is exported.
+ virtual JITSymbol findStub(StringRef Name, bool ExportedStubsOnly) = 0;
+
+ /// @brief Find the implementation-pointer for the stub.
+ virtual JITSymbol findPointer(StringRef Name) = 0;
+
+ /// @brief Change the value of the implementation pointer for the stub.
+ virtual Error updatePointer(StringRef Name, JITTargetAddress NewAddr) = 0;
+
+private:
+ virtual void anchor();
+};
+
+/// @brief IndirectStubsManager implementation for the host architecture, e.g.
+/// OrcX86_64. (See OrcArchitectureSupport.h).
+template <typename TargetT>
+class LocalIndirectStubsManager : public IndirectStubsManager {
+public:
+ Error createStub(StringRef StubName, JITTargetAddress StubAddr,
+ JITSymbolFlags StubFlags) override {
+ if (auto Err = reserveStubs(1))
+ return Err;
+
+ createStubInternal(StubName, StubAddr, StubFlags);
+
+ return Error::success();
+ }
+
+ Error createStubs(const StubInitsMap &StubInits) override {
+ if (auto Err = reserveStubs(StubInits.size()))
+ return Err;
+
+ for (auto &Entry : StubInits)
+ createStubInternal(Entry.first(), Entry.second.first,
+ Entry.second.second);
+
+ return Error::success();
+ }
+
+ JITSymbol findStub(StringRef Name, bool ExportedStubsOnly) override {
+ auto I = StubIndexes.find(Name);
+ if (I == StubIndexes.end())
+ return nullptr;
+ auto Key = I->second.first;
+ void *StubAddr = IndirectStubsInfos[Key.first].getStub(Key.second);
+ assert(StubAddr && "Missing stub address");
+ auto StubTargetAddr =
+ static_cast<JITTargetAddress>(reinterpret_cast<uintptr_t>(StubAddr));
+ auto StubSymbol = JITSymbol(StubTargetAddr, I->second.second);
+ if (ExportedStubsOnly && !StubSymbol.getFlags().isExported())
+ return nullptr;
+ return StubSymbol;
+ }
+
+ JITSymbol findPointer(StringRef Name) override {
+ auto I = StubIndexes.find(Name);
+ if (I == StubIndexes.end())
+ return nullptr;
+ auto Key = I->second.first;
+ void *PtrAddr = IndirectStubsInfos[Key.first].getPtr(Key.second);
+ assert(PtrAddr && "Missing pointer address");
+ auto PtrTargetAddr =
+ static_cast<JITTargetAddress>(reinterpret_cast<uintptr_t>(PtrAddr));
+ return JITSymbol(PtrTargetAddr, I->second.second);
+ }
+
+ Error updatePointer(StringRef Name, JITTargetAddress NewAddr) override {
+ auto I = StubIndexes.find(Name);
+ assert(I != StubIndexes.end() && "No stub pointer for symbol");
+ auto Key = I->second.first;
+ *IndirectStubsInfos[Key.first].getPtr(Key.second) =
+ reinterpret_cast<void *>(static_cast<uintptr_t>(NewAddr));
+ return Error::success();
+ }
+
+private:
+ Error reserveStubs(unsigned NumStubs) {
+ if (NumStubs <= FreeStubs.size())
+ return Error::success();
+
+ unsigned NewStubsRequired = NumStubs - FreeStubs.size();
+ unsigned NewBlockId = IndirectStubsInfos.size();
+ typename TargetT::IndirectStubsInfo ISI;
+ if (auto Err =
+ TargetT::emitIndirectStubsBlock(ISI, NewStubsRequired, nullptr))
+ return Err;
+ for (unsigned I = 0; I < ISI.getNumStubs(); ++I)
+ FreeStubs.push_back(std::make_pair(NewBlockId, I));
+ IndirectStubsInfos.push_back(std::move(ISI));
+ return Error::success();
+ }
+
+ void createStubInternal(StringRef StubName, JITTargetAddress InitAddr,
+ JITSymbolFlags StubFlags) {
+ auto Key = FreeStubs.back();
+ FreeStubs.pop_back();
+ *IndirectStubsInfos[Key.first].getPtr(Key.second) =
+ reinterpret_cast<void *>(static_cast<uintptr_t>(InitAddr));
+ StubIndexes[StubName] = std::make_pair(Key, StubFlags);
+ }
+
+ std::vector<typename TargetT::IndirectStubsInfo> IndirectStubsInfos;
+ using StubKey = std::pair<uint16_t, uint16_t>;
+ std::vector<StubKey> FreeStubs;
+ StringMap<std::pair<StubKey, JITSymbolFlags>> StubIndexes;
+};
+
+/// @brief Create a local compile callback manager.
+///
+/// The given target triple will determine the ABI, and the given
+/// ErrorHandlerAddress will be used by the resulting compile callback
+/// manager if a compile callback fails.
+std::unique_ptr<JITCompileCallbackManager>
+createLocalCompileCallbackManager(const Triple &T,
+ JITTargetAddress ErrorHandlerAddress);
+
+/// @brief Create a local indriect stubs manager builder.
+///
+/// The given target triple will determine the ABI.
+std::function<std::unique_ptr<IndirectStubsManager>()>
+createLocalIndirectStubsManagerBuilder(const Triple &T);
+
+/// @brief Build a function pointer of FunctionType with the given constant
+/// address.
+///
+/// Usage example: Turn a trampoline address into a function pointer constant
+/// for use in a stub.
+Constant *createIRTypedAddress(FunctionType &FT, JITTargetAddress Addr);
+
+/// @brief Create a function pointer with the given type, name, and initializer
+/// in the given Module.
+GlobalVariable *createImplPointer(PointerType &PT, Module &M, const Twine &Name,
+ Constant *Initializer);
+
+/// @brief Turn a function declaration into a stub function that makes an
+/// indirect call using the given function pointer.
+void makeStub(Function &F, Value &ImplPointer);
+
+/// @brief Raise linkage types and rename as necessary to ensure that all
+/// symbols are accessible for other modules.
+///
+/// This should be called before partitioning a module to ensure that the
+/// partitions retain access to each other's symbols.
+void makeAllSymbolsExternallyAccessible(Module &M);
+
+/// @brief Clone a function declaration into a new module.
+///
+/// This function can be used as the first step towards creating a callback
+/// stub (see makeStub), or moving a function body (see moveFunctionBody).
+///
+/// If the VMap argument is non-null, a mapping will be added between F and
+/// the new declaration, and between each of F's arguments and the new
+/// declaration's arguments. This map can then be passed in to moveFunction to
+/// move the function body if required. Note: When moving functions between
+/// modules with these utilities, all decls should be cloned (and added to a
+/// single VMap) before any bodies are moved. This will ensure that references
+/// between functions all refer to the versions in the new module.
+Function *cloneFunctionDecl(Module &Dst, const Function &F,
+ ValueToValueMapTy *VMap = nullptr);
+
+/// @brief Move the body of function 'F' to a cloned function declaration in a
+/// different module (See related cloneFunctionDecl).
+///
+/// If the target function declaration is not supplied via the NewF parameter
+/// then it will be looked up via the VMap.
+///
+/// This will delete the body of function 'F' from its original parent module,
+/// but leave its declaration.
+void moveFunctionBody(Function &OrigF, ValueToValueMapTy &VMap,
+ ValueMaterializer *Materializer = nullptr,
+ Function *NewF = nullptr);
+
+/// @brief Clone a global variable declaration into a new module.
+GlobalVariable *cloneGlobalVariableDecl(Module &Dst, const GlobalVariable &GV,
+ ValueToValueMapTy *VMap = nullptr);
+
+/// @brief Move global variable GV from its parent module to cloned global
+/// declaration in a different module.
+///
+/// If the target global declaration is not supplied via the NewGV parameter
+/// then it will be looked up via the VMap.
+///
+/// This will delete the initializer of GV from its original parent module,
+/// but leave its declaration.
+void moveGlobalVariableInitializer(GlobalVariable &OrigGV,
+ ValueToValueMapTy &VMap,
+ ValueMaterializer *Materializer = nullptr,
+ GlobalVariable *NewGV = nullptr);
+
+/// @brief Clone a global alias declaration into a new module.
+GlobalAlias *cloneGlobalAliasDecl(Module &Dst, const GlobalAlias &OrigA,
+ ValueToValueMapTy &VMap);
+
+/// @brief Clone module flags metadata into the destination module.
+void cloneModuleFlagsMetadata(Module &Dst, const Module &Src,
+ ValueToValueMapTy &VMap);
+
+} // end namespace orc
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/LambdaResolver.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/LambdaResolver.h
new file mode 100644
index 0000000..7b6f3d2
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/LambdaResolver.h
@@ -0,0 +1,59 @@
+//===- LambdaResolverMM - Redirect symbol lookup via a functor --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a RuntimeDyld::SymbolResolver subclass that uses a user-supplied
+// functor for symbol resolution.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_LAMBDARESOLVER_H
+#define LLVM_EXECUTIONENGINE_ORC_LAMBDARESOLVER_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include <memory>
+
+namespace llvm {
+namespace orc {
+
+template <typename DylibLookupFtorT, typename ExternalLookupFtorT>
+class LambdaResolver : public LegacyJITSymbolResolver {
+public:
+ LambdaResolver(DylibLookupFtorT DylibLookupFtor,
+ ExternalLookupFtorT ExternalLookupFtor)
+ : DylibLookupFtor(DylibLookupFtor),
+ ExternalLookupFtor(ExternalLookupFtor) {}
+
+ JITSymbol findSymbolInLogicalDylib(const std::string &Name) final {
+ return DylibLookupFtor(Name);
+ }
+
+ JITSymbol findSymbol(const std::string &Name) final {
+ return ExternalLookupFtor(Name);
+ }
+
+private:
+ DylibLookupFtorT DylibLookupFtor;
+ ExternalLookupFtorT ExternalLookupFtor;
+};
+
+template <typename DylibLookupFtorT,
+ typename ExternalLookupFtorT>
+std::shared_ptr<LambdaResolver<DylibLookupFtorT, ExternalLookupFtorT>>
+createLambdaResolver(DylibLookupFtorT DylibLookupFtor,
+ ExternalLookupFtorT ExternalLookupFtor) {
+ using LR = LambdaResolver<DylibLookupFtorT, ExternalLookupFtorT>;
+ return make_unique<LR>(std::move(DylibLookupFtor),
+ std::move(ExternalLookupFtor));
+}
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_LAMBDARESOLVER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
new file mode 100644
index 0000000..4117a92
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
@@ -0,0 +1,261 @@
+//===- LazyEmittingLayer.h - Lazily emit IR to lower JIT layers -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains the definition for a lazy-emitting layer for the JIT.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_LAZYEMITTINGLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_LAZYEMITTINGLAYER_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <list>
+#include <memory>
+#include <string>
+
+namespace llvm {
+namespace orc {
+
+/// @brief Lazy-emitting IR layer.
+///
+/// This layer accepts LLVM IR Modules (via addModule), but does not
+/// immediately emit them the layer below. Instead, emissing to the base layer
+/// is deferred until the first time the client requests the address (via
+/// JITSymbol::getAddress) for a symbol contained in this layer.
+template <typename BaseLayerT> class LazyEmittingLayer {
+private:
+ class EmissionDeferredModule {
+ public:
+ EmissionDeferredModule(VModuleKey K, std::unique_ptr<Module> M)
+ : K(std::move(K)), M(std::move(M)) {}
+
+ JITSymbol find(StringRef Name, bool ExportedSymbolsOnly, BaseLayerT &B) {
+ switch (EmitState) {
+ case NotEmitted:
+ if (auto GV = searchGVs(Name, ExportedSymbolsOnly)) {
+ // Create a std::string version of Name to capture here - the argument
+ // (a StringRef) may go away before the lambda is executed.
+ // FIXME: Use capture-init when we move to C++14.
+ std::string PName = Name;
+ JITSymbolFlags Flags = JITSymbolFlags::fromGlobalValue(*GV);
+ auto GetAddress =
+ [this, ExportedSymbolsOnly, PName, &B]() -> Expected<JITTargetAddress> {
+ if (this->EmitState == Emitting)
+ return 0;
+ else if (this->EmitState == NotEmitted) {
+ this->EmitState = Emitting;
+ if (auto Err = this->emitToBaseLayer(B))
+ return std::move(Err);
+ this->EmitState = Emitted;
+ }
+ if (auto Sym = B.findSymbolIn(K, PName, ExportedSymbolsOnly))
+ return Sym.getAddress();
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
+ else
+ llvm_unreachable("Successful symbol lookup should return "
+ "definition address here");
+ };
+ return JITSymbol(std::move(GetAddress), Flags);
+ } else
+ return nullptr;
+ case Emitting:
+ // Calling "emit" can trigger a recursive call to 'find' (e.g. to check
+ // for pre-existing definitions of common-symbol), but any symbol in
+ // this module would already have been found internally (in the
+ // RuntimeDyld that did the lookup), so just return a nullptr here.
+ return nullptr;
+ case Emitted:
+ return B.findSymbolIn(K, Name, ExportedSymbolsOnly);
+ }
+ llvm_unreachable("Invalid emit-state.");
+ }
+
+ Error removeModuleFromBaseLayer(BaseLayerT& BaseLayer) {
+ return EmitState != NotEmitted ? BaseLayer.removeModule(K)
+ : Error::success();
+ }
+
+ void emitAndFinalize(BaseLayerT &BaseLayer) {
+ assert(EmitState != Emitting &&
+ "Cannot emitAndFinalize while already emitting");
+ if (EmitState == NotEmitted) {
+ EmitState = Emitting;
+ emitToBaseLayer(BaseLayer);
+ EmitState = Emitted;
+ }
+ BaseLayer.emitAndFinalize(K);
+ }
+
+ private:
+
+ const GlobalValue* searchGVs(StringRef Name,
+ bool ExportedSymbolsOnly) const {
+ // FIXME: We could clean all this up if we had a way to reliably demangle
+ // names: We could just demangle name and search, rather than
+ // mangling everything else.
+
+ // If we have already built the mangled name set then just search it.
+ if (MangledSymbols) {
+ auto VI = MangledSymbols->find(Name);
+ if (VI == MangledSymbols->end())
+ return nullptr;
+ auto GV = VI->second;
+ if (!ExportedSymbolsOnly || GV->hasDefaultVisibility())
+ return GV;
+ return nullptr;
+ }
+
+ // If we haven't built the mangled name set yet, try to build it. As an
+ // optimization this will leave MangledNames set to nullptr if we find
+ // Name in the process of building the set.
+ return buildMangledSymbols(Name, ExportedSymbolsOnly);
+ }
+
+ Error emitToBaseLayer(BaseLayerT &BaseLayer) {
+ // We don't need the mangled names set any more: Once we've emitted this
+ // to the base layer we'll just look for symbols there.
+ MangledSymbols.reset();
+ return BaseLayer.addModule(std::move(K), std::move(M));
+ }
+
+ // If the mangled name of the given GlobalValue matches the given search
+ // name (and its visibility conforms to the ExportedSymbolsOnly flag) then
+ // return the symbol. Otherwise, add the mangled name to the Names map and
+ // return nullptr.
+ const GlobalValue* addGlobalValue(StringMap<const GlobalValue*> &Names,
+ const GlobalValue &GV,
+ const Mangler &Mang, StringRef SearchName,
+ bool ExportedSymbolsOnly) const {
+ // Modules don't "provide" decls or common symbols.
+ if (GV.isDeclaration() || GV.hasCommonLinkage())
+ return nullptr;
+
+ // Mangle the GV name.
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mang.getNameWithPrefix(MangledNameStream, &GV, false);
+ }
+
+ // Check whether this is the name we were searching for, and if it is then
+ // bail out early.
+ if (MangledName == SearchName)
+ if (!ExportedSymbolsOnly || GV.hasDefaultVisibility())
+ return &GV;
+
+ // Otherwise add this to the map for later.
+ Names[MangledName] = &GV;
+ return nullptr;
+ }
+
+ // Build the MangledSymbols map. Bails out early (with MangledSymbols left set
+ // to nullptr) if the given SearchName is found while building the map.
+ const GlobalValue* buildMangledSymbols(StringRef SearchName,
+ bool ExportedSymbolsOnly) const {
+ assert(!MangledSymbols && "Mangled symbols map already exists?");
+
+ auto Symbols = llvm::make_unique<StringMap<const GlobalValue*>>();
+
+ Mangler Mang;
+
+ for (const auto &GO : M->global_objects())
+ if (auto GV = addGlobalValue(*Symbols, GO, Mang, SearchName,
+ ExportedSymbolsOnly))
+ return GV;
+
+ MangledSymbols = std::move(Symbols);
+ return nullptr;
+ }
+
+ enum { NotEmitted, Emitting, Emitted } EmitState = NotEmitted;
+ VModuleKey K;
+ std::unique_ptr<Module> M;
+ mutable std::unique_ptr<StringMap<const GlobalValue*>> MangledSymbols;
+ };
+
+ BaseLayerT &BaseLayer;
+ std::map<VModuleKey, std::unique_ptr<EmissionDeferredModule>> ModuleMap;
+
+public:
+
+ /// @brief Construct a lazy emitting layer.
+ LazyEmittingLayer(BaseLayerT &BaseLayer) : BaseLayer(BaseLayer) {}
+
+ /// @brief Add the given module to the lazy emitting layer.
+ Error addModule(VModuleKey K, std::unique_ptr<Module> M) {
+ assert(!ModuleMap.count(K) && "VModuleKey K already in use");
+ ModuleMap[K] =
+ llvm::make_unique<EmissionDeferredModule>(std::move(K), std::move(M));
+ return Error::success();
+ }
+
+ /// @brief Remove the module represented by the given handle.
+ ///
+ /// This method will free the memory associated with the given module, both
+ /// in this layer, and the base layer.
+ Error removeModule(VModuleKey K) {
+ auto I = ModuleMap.find(K);
+ assert(I != ModuleMap.end() && "VModuleKey K not valid here");
+ auto EDM = std::move(I.second);
+ ModuleMap.erase(I);
+ return EDM->removeModuleFromBaseLayer(BaseLayer);
+ }
+
+ /// @brief Search for the given named symbol.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it exists.
+ JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+ // Look for the symbol among existing definitions.
+ if (auto Symbol = BaseLayer.findSymbol(Name, ExportedSymbolsOnly))
+ return Symbol;
+
+ // If not found then search the deferred modules. If any of these contain a
+ // definition of 'Name' then they will return a JITSymbol that will emit
+ // the corresponding module when the symbol address is requested.
+ for (auto &KV : ModuleMap)
+ if (auto Symbol = KV.second->find(Name, ExportedSymbolsOnly, BaseLayer))
+ return Symbol;
+
+ // If no definition found anywhere return a null symbol.
+ return nullptr;
+ }
+
+ /// @brief Get the address of the given symbol in the context of the of
+ /// compiled modules represented by the key K.
+ JITSymbol findSymbolIn(VModuleKey K, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ assert(ModuleMap.count(K) && "VModuleKey K not valid here");
+ return ModuleMap[K]->find(Name, ExportedSymbolsOnly, BaseLayer);
+ }
+
+ /// @brief Immediately emit and finalize the module represented by the given
+ /// key.
+ Error emitAndFinalize(VModuleKey K) {
+ assert(ModuleMap.count(K) && "VModuleKey K not valid here");
+ return ModuleMap[K]->emitAndFinalize(BaseLayer);
+ }
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_LAZYEMITTINGLAYER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/Legacy.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/Legacy.h
new file mode 100644
index 0000000..b2b389a
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/Legacy.h
@@ -0,0 +1,138 @@
+//===--- Legacy.h -- Adapters for ExecutionEngine API interop ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains core ORC APIs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_LEGACY_H
+#define LLVM_EXECUTIONENGINE_ORC_LEGACY_H
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Core.h"
+
+namespace llvm {
+namespace orc {
+
+class JITSymbolResolverAdapter : public JITSymbolResolver {
+public:
+ JITSymbolResolverAdapter(ExecutionSession &ES, SymbolResolver &R);
+ Expected<LookupFlagsResult> lookupFlags(const LookupSet &Symbols) override;
+ Expected<LookupResult> lookup(const LookupSet &Symbols) override;
+
+private:
+ ExecutionSession &ES;
+ std::set<SymbolStringPtr> ResolvedStrings;
+ SymbolResolver &R;
+};
+
+/// @brief Use the given legacy-style FindSymbol function (i.e. a function that
+/// takes a const std::string& or StringRef and returns a JITSymbol) to
+/// find the flags for each symbol in Symbols and store their flags in
+/// SymbolFlags. If any JITSymbol returned by FindSymbol is in an error
+/// state the function returns immediately with that error, otherwise it
+/// returns the set of symbols not found.
+///
+/// Useful for implementing lookupFlags bodies that query legacy resolvers.
+template <typename FindSymbolFn>
+Expected<SymbolNameSet> lookupFlagsWithLegacyFn(SymbolFlagsMap &SymbolFlags,
+ const SymbolNameSet &Symbols,
+ FindSymbolFn FindSymbol) {
+ SymbolNameSet SymbolsNotFound;
+
+ for (auto &S : Symbols) {
+ if (JITSymbol Sym = FindSymbol(*S))
+ SymbolFlags[S] = Sym.getFlags();
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
+ else
+ SymbolsNotFound.insert(S);
+ }
+
+ return SymbolsNotFound;
+}
+
+/// @brief Use the given legacy-style FindSymbol function (i.e. a function that
+/// takes a const std::string& or StringRef and returns a JITSymbol) to
+/// find the address and flags for each symbol in Symbols and store the
+/// result in Query. If any JITSymbol returned by FindSymbol is in an
+/// error then Query.setFailed(...) is called with that error and the
+/// function returns immediately. On success, returns the set of symbols
+/// not found.
+///
+/// Useful for implementing lookup bodies that query legacy resolvers.
+template <typename FindSymbolFn>
+SymbolNameSet lookupWithLegacyFn(AsynchronousSymbolQuery &Query,
+ const SymbolNameSet &Symbols,
+ FindSymbolFn FindSymbol) {
+ SymbolNameSet SymbolsNotFound;
+
+ for (auto &S : Symbols) {
+ if (JITSymbol Sym = FindSymbol(*S)) {
+ if (auto Addr = Sym.getAddress()) {
+ Query.setDefinition(S, JITEvaluatedSymbol(*Addr, Sym.getFlags()));
+ Query.notifySymbolFinalized();
+ } else {
+ Query.setFailed(Addr.takeError());
+ return SymbolNameSet();
+ }
+ } else if (auto Err = Sym.takeError()) {
+ Query.setFailed(std::move(Err));
+ return SymbolNameSet();
+ } else
+ SymbolsNotFound.insert(S);
+ }
+
+ return SymbolsNotFound;
+}
+
+/// @brief An ORC SymbolResolver implementation that uses a legacy
+/// findSymbol-like function to perform lookup;
+template <typename LegacyLookupFn>
+class LegacyLookupFnResolver final : public SymbolResolver {
+public:
+ using ErrorReporter = std::function<void(Error)>;
+
+ LegacyLookupFnResolver(LegacyLookupFn LegacyLookup, ErrorReporter ReportError)
+ : LegacyLookup(std::move(LegacyLookup)),
+ ReportError(std::move(ReportError)) {}
+
+ SymbolNameSet lookupFlags(SymbolFlagsMap &Flags,
+ const SymbolNameSet &Symbols) final {
+ if (auto RemainingSymbols =
+ lookupFlagsWithLegacyFn(Flags, Symbols, LegacyLookup))
+ return std::move(*RemainingSymbols);
+ else {
+ ReportError(RemainingSymbols.takeError());
+ return Symbols;
+ }
+ }
+
+ SymbolNameSet lookup(std::shared_ptr<AsynchronousSymbolQuery> Query,
+ SymbolNameSet Symbols) final {
+ return lookupWithLegacyFn(*Query, Symbols, LegacyLookup);
+ }
+
+private:
+ LegacyLookupFn LegacyLookup;
+ ErrorReporter ReportError;
+};
+
+template <typename LegacyLookupFn>
+std::shared_ptr<LegacyLookupFnResolver<LegacyLookupFn>>
+createLegacyLookupResolver(LegacyLookupFn LegacyLookup,
+ std::function<void(Error)> ErrorReporter) {
+ return std::make_shared<LegacyLookupFnResolver<LegacyLookupFn>>(
+ std::move(LegacyLookup), std::move(ErrorReporter));
+}
+
+} // End namespace orc
+} // End namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_LEGACY_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/NullResolver.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/NullResolver.h
new file mode 100644
index 0000000..eba9b95
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/NullResolver.h
@@ -0,0 +1,45 @@
+//===------ NullResolver.h - Reject symbol lookup requests ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a RuntimeDyld::SymbolResolver subclass that rejects all symbol
+// resolution requests, for clients that have no cross-object fixups.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_NULLRESOLVER_H
+#define LLVM_EXECUTIONENGINE_ORC_NULLRESOLVER_H
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+
+namespace llvm {
+namespace orc {
+
+class NullResolver : public SymbolResolver {
+public:
+ SymbolNameSet lookupFlags(SymbolFlagsMap &Flags,
+ const SymbolNameSet &Symbols) override;
+
+ SymbolNameSet lookup(std::shared_ptr<AsynchronousSymbolQuery> Query,
+ SymbolNameSet Symbols) override;
+};
+
+/// SymbolResolver impliementation that rejects all resolution requests.
+/// Useful for clients that have no cross-object fixups.
+class NullLegacyResolver : public LegacyJITSymbolResolver {
+public:
+ JITSymbol findSymbol(const std::string &Name) final;
+
+ JITSymbol findSymbolInLogicalDylib(const std::string &Name) final;
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_NULLRESOLVER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h
new file mode 100644
index 0000000..cfc3922
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h
@@ -0,0 +1,97 @@
+//===- ObjectTransformLayer.h - Run all objects through functor -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Run all objects passed in through a user supplied functor.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_OBJECTTRANSFORMLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_OBJECTTRANSFORMLAYER_H
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include <algorithm>
+#include <memory>
+#include <string>
+
+namespace llvm {
+namespace orc {
+
+/// @brief Object mutating layer.
+///
+/// This layer accepts sets of ObjectFiles (via addObject). It
+/// immediately applies the user supplied functor to each object, then adds
+/// the set of transformed objects to the layer below.
+template <typename BaseLayerT, typename TransformFtor>
+class ObjectTransformLayer {
+public:
+ /// @brief Construct an ObjectTransformLayer with the given BaseLayer
+ ObjectTransformLayer(BaseLayerT &BaseLayer,
+ TransformFtor Transform = TransformFtor())
+ : BaseLayer(BaseLayer), Transform(std::move(Transform)) {}
+
+ /// @brief Apply the transform functor to each object in the object set, then
+ /// add the resulting set of objects to the base layer, along with the
+ /// memory manager and symbol resolver.
+ ///
+ /// @return A handle for the added objects.
+ template <typename ObjectPtr> Error addObject(VModuleKey K, ObjectPtr Obj) {
+ return BaseLayer.addObject(std::move(K), Transform(std::move(Obj)));
+ }
+
+ /// @brief Remove the object set associated with the VModuleKey K.
+ Error removeObject(VModuleKey K) { return BaseLayer.removeObject(K); }
+
+ /// @brief Search for the given named symbol.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it exists.
+ JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+ return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Get the address of the given symbol in the context of the set of
+ /// objects represented by the VModuleKey K. This call is forwarded to
+ /// the base layer's implementation.
+ /// @param K The VModuleKey associated with the object set to search in.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it is found in the
+ /// given object set.
+ JITSymbol findSymbolIn(VModuleKey K, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ return BaseLayer.findSymbolIn(K, Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Immediately emit and finalize the object set represented by the
+ /// given VModuleKey K.
+ Error emitAndFinalize(VModuleKey K) { return BaseLayer.emitAndFinalize(K); }
+
+ /// @brief Map section addresses for the objects associated with the
+ /// VModuleKey K.
+ void mapSectionAddress(VModuleKey K, const void *LocalAddress,
+ JITTargetAddress TargetAddr) {
+ BaseLayer.mapSectionAddress(K, LocalAddress, TargetAddr);
+ }
+
+ /// @brief Access the transform functor directly.
+ TransformFtor &getTransform() { return Transform; }
+
+ /// @brief Access the mumate functor directly.
+ const TransformFtor &getTransform() const { return Transform; }
+
+private:
+ BaseLayerT &BaseLayer;
+ TransformFtor Transform;
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_OBJECTTRANSFORMLAYER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcABISupport.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcABISupport.h
new file mode 100644
index 0000000..e1b5564
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcABISupport.h
@@ -0,0 +1,244 @@
+//===- OrcABISupport.h - ABI support code -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// ABI specific code for Orc, e.g. callback assembly.
+//
+// ABI classes should be part of the JIT *target* process, not the host
+// process (except where you're doing hosted JITing and the two are one and the
+// same).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_ORCABISUPPORT_H
+#define LLVM_EXECUTIONENGINE_ORC_ORCABISUPPORT_H
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Memory.h"
+#include <algorithm>
+#include <cstdint>
+
+namespace llvm {
+namespace orc {
+
+/// Generic ORC ABI support.
+///
+/// This class can be substituted as the target architecure support class for
+/// ORC templates that require one (e.g. IndirectStubsManagers). It does not
+/// support lazy JITing however, and any attempt to use that functionality
+/// will result in execution of an llvm_unreachable.
+class OrcGenericABI {
+public:
+ static const unsigned PointerSize = sizeof(uintptr_t);
+ static const unsigned TrampolineSize = 1;
+ static const unsigned ResolverCodeSize = 1;
+
+ using JITReentryFn = JITTargetAddress (*)(void *CallbackMgr,
+ void *TrampolineId);
+
+ static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
+ void *CallbackMgr) {
+ llvm_unreachable("writeResolverCode is not supported by the generic host "
+ "support class");
+ }
+
+ static void writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
+ unsigned NumTrampolines) {
+ llvm_unreachable("writeTrampolines is not supported by the generic host "
+ "support class");
+ }
+
+ class IndirectStubsInfo {
+ public:
+ const static unsigned StubSize = 1;
+
+ unsigned getNumStubs() const { llvm_unreachable("Not supported"); }
+ void *getStub(unsigned Idx) const { llvm_unreachable("Not supported"); }
+ void **getPtr(unsigned Idx) const { llvm_unreachable("Not supported"); }
+ };
+
+ static Error emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
+ unsigned MinStubs, void *InitialPtrVal) {
+ llvm_unreachable("emitIndirectStubsBlock is not supported by the generic "
+ "host support class");
+ }
+};
+
+/// @brief Provide information about stub blocks generated by the
+/// makeIndirectStubsBlock function.
+template <unsigned StubSizeVal> class GenericIndirectStubsInfo {
+public:
+ const static unsigned StubSize = StubSizeVal;
+
+ GenericIndirectStubsInfo() = default;
+ GenericIndirectStubsInfo(unsigned NumStubs, sys::OwningMemoryBlock StubsMem)
+ : NumStubs(NumStubs), StubsMem(std::move(StubsMem)) {}
+ GenericIndirectStubsInfo(GenericIndirectStubsInfo &&Other)
+ : NumStubs(Other.NumStubs), StubsMem(std::move(Other.StubsMem)) {
+ Other.NumStubs = 0;
+ }
+
+ GenericIndirectStubsInfo &operator=(GenericIndirectStubsInfo &&Other) {
+ NumStubs = Other.NumStubs;
+ Other.NumStubs = 0;
+ StubsMem = std::move(Other.StubsMem);
+ return *this;
+ }
+
+ /// @brief Number of stubs in this block.
+ unsigned getNumStubs() const { return NumStubs; }
+
+ /// @brief Get a pointer to the stub at the given index, which must be in
+ /// the range 0 .. getNumStubs() - 1.
+ void *getStub(unsigned Idx) const {
+ return static_cast<char *>(StubsMem.base()) + Idx * StubSize;
+ }
+
+ /// @brief Get a pointer to the implementation-pointer at the given index,
+ /// which must be in the range 0 .. getNumStubs() - 1.
+ void **getPtr(unsigned Idx) const {
+ char *PtrsBase = static_cast<char *>(StubsMem.base()) + NumStubs * StubSize;
+ return reinterpret_cast<void **>(PtrsBase) + Idx;
+ }
+
+private:
+ unsigned NumStubs = 0;
+ sys::OwningMemoryBlock StubsMem;
+};
+
+class OrcAArch64 {
+public:
+ static const unsigned PointerSize = 8;
+ static const unsigned TrampolineSize = 12;
+ static const unsigned ResolverCodeSize = 0x120;
+
+ using IndirectStubsInfo = GenericIndirectStubsInfo<8>;
+
+ using JITReentryFn = JITTargetAddress (*)(void *CallbackMgr,
+ void *TrampolineId);
+
+ /// @brief Write the resolver code into the given memory. The user is be
+ /// responsible for allocating the memory and setting permissions.
+ static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
+ void *CallbackMgr);
+
+ /// @brief Write the requsted number of trampolines into the given memory,
+ /// which must be big enough to hold 1 pointer, plus NumTrampolines
+ /// trampolines.
+ static void writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
+ unsigned NumTrampolines);
+
+ /// @brief Emit at least MinStubs worth of indirect call stubs, rounded out to
+ /// the nearest page size.
+ ///
+ /// E.g. Asking for 4 stubs on x86-64, where stubs are 8-bytes, with 4k
+ /// pages will return a block of 512 stubs (4096 / 8 = 512). Asking for 513
+ /// will return a block of 1024 (2-pages worth).
+ static Error emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
+ unsigned MinStubs, void *InitialPtrVal);
+};
+
+/// @brief X86_64 code that's common to all ABIs.
+///
+/// X86_64 supports lazy JITing.
+class OrcX86_64_Base {
+public:
+ static const unsigned PointerSize = 8;
+ static const unsigned TrampolineSize = 8;
+
+ using IndirectStubsInfo = GenericIndirectStubsInfo<8>;
+
+ /// @brief Write the requsted number of trampolines into the given memory,
+ /// which must be big enough to hold 1 pointer, plus NumTrampolines
+ /// trampolines.
+ static void writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
+ unsigned NumTrampolines);
+
+ /// @brief Emit at least MinStubs worth of indirect call stubs, rounded out to
+ /// the nearest page size.
+ ///
+ /// E.g. Asking for 4 stubs on x86-64, where stubs are 8-bytes, with 4k
+ /// pages will return a block of 512 stubs (4096 / 8 = 512). Asking for 513
+ /// will return a block of 1024 (2-pages worth).
+ static Error emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
+ unsigned MinStubs, void *InitialPtrVal);
+};
+
+/// @brief X86_64 support for SysV ABI (Linux, MacOSX).
+///
+/// X86_64_SysV supports lazy JITing.
+class OrcX86_64_SysV : public OrcX86_64_Base {
+public:
+ static const unsigned ResolverCodeSize = 0x6C;
+
+ using JITReentryFn = JITTargetAddress (*)(void *CallbackMgr,
+ void *TrampolineId);
+
+ /// @brief Write the resolver code into the given memory. The user is be
+ /// responsible for allocating the memory and setting permissions.
+ static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
+ void *CallbackMgr);
+};
+
+/// @brief X86_64 support for Win32.
+///
+/// X86_64_Win32 supports lazy JITing.
+class OrcX86_64_Win32 : public OrcX86_64_Base {
+public:
+ static const unsigned ResolverCodeSize = 0x74;
+
+ using JITReentryFn = JITTargetAddress (*)(void *CallbackMgr,
+ void *TrampolineId);
+
+ /// @brief Write the resolver code into the given memory. The user is be
+ /// responsible for allocating the memory and setting permissions.
+ static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
+ void *CallbackMgr);
+};
+
+/// @brief I386 support.
+///
+/// I386 supports lazy JITing.
+class OrcI386 {
+public:
+ static const unsigned PointerSize = 4;
+ static const unsigned TrampolineSize = 8;
+ static const unsigned ResolverCodeSize = 0x4a;
+
+ using IndirectStubsInfo = GenericIndirectStubsInfo<8>;
+
+ using JITReentryFn = JITTargetAddress (*)(void *CallbackMgr,
+ void *TrampolineId);
+
+ /// @brief Write the resolver code into the given memory. The user is be
+ /// responsible for allocating the memory and setting permissions.
+ static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
+ void *CallbackMgr);
+
+ /// @brief Write the requsted number of trampolines into the given memory,
+ /// which must be big enough to hold 1 pointer, plus NumTrampolines
+ /// trampolines.
+ static void writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
+ unsigned NumTrampolines);
+
+ /// @brief Emit at least MinStubs worth of indirect call stubs, rounded out to
+ /// the nearest page size.
+ ///
+ /// E.g. Asking for 4 stubs on i386, where stubs are 8-bytes, with 4k
+ /// pages will return a block of 512 stubs (4096 / 8 = 512). Asking for 513
+ /// will return a block of 1024 (2-pages worth).
+ static Error emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
+ unsigned MinStubs, void *InitialPtrVal);
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_ORCABISUPPORT_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcError.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcError.h
new file mode 100644
index 0000000..c2ff41e
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcError.h
@@ -0,0 +1,70 @@
+//===------ OrcError.h - Reject symbol lookup requests ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Define an error category, error codes, and helper utilities for Orc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_ORCERROR_H
+#define LLVM_EXECUTIONENGINE_ORC_ORCERROR_H
+
+#include "llvm/Support/Error.h"
+#include <system_error>
+
+namespace llvm {
+namespace orc {
+
+enum class OrcErrorCode : int {
+ // RPC Errors
+ DuplicateDefinition = 1,
+ JITSymbolNotFound,
+ RemoteAllocatorDoesNotExist,
+ RemoteAllocatorIdAlreadyInUse,
+ RemoteMProtectAddrUnrecognized,
+ RemoteIndirectStubsOwnerDoesNotExist,
+ RemoteIndirectStubsOwnerIdAlreadyInUse,
+ RPCConnectionClosed,
+ RPCCouldNotNegotiateFunction,
+ RPCResponseAbandoned,
+ UnexpectedRPCCall,
+ UnexpectedRPCResponse,
+ UnknownErrorCodeFromRemote,
+ UnknownResourceHandle
+};
+
+std::error_code orcError(OrcErrorCode ErrCode);
+
+class DuplicateDefinition : public ErrorInfo<DuplicateDefinition> {
+public:
+ static char ID;
+
+ DuplicateDefinition(std::string SymbolName);
+ std::error_code convertToErrorCode() const override;
+ void log(raw_ostream &OS) const override;
+ const std::string &getSymbolName() const;
+private:
+ std::string SymbolName;
+};
+
+class JITSymbolNotFound : public ErrorInfo<JITSymbolNotFound> {
+public:
+ static char ID;
+
+ JITSymbolNotFound(std::string SymbolName);
+ std::error_code convertToErrorCode() const override;
+ void log(raw_ostream &OS) const override;
+ const std::string &getSymbolName() const;
+private:
+ std::string SymbolName;
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_ORCERROR_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h
new file mode 100644
index 0000000..7179e5f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetClient.h
@@ -0,0 +1,672 @@
+//===- OrcRemoteTargetClient.h - Orc Remote-target Client -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the OrcRemoteTargetClient class and helpers. This class
+// can be used to communicate over an RawByteChannel with an
+// OrcRemoteTargetServer instance to support remote-JITing.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETCLIENT_H
+#define LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETCLIENT_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
+#include "llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Memory.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#define DEBUG_TYPE "orc-remote"
+
+namespace llvm {
+namespace orc {
+namespace remote {
+
+/// This class provides utilities (including memory manager, indirect stubs
+/// manager, and compile callback manager types) that support remote JITing
+/// in ORC.
+///
+/// Each of the utility classes talks to a JIT server (an instance of the
+/// OrcRemoteTargetServer class) via an RPC system (see RPCUtils.h) to carry out
+/// its actions.
+class OrcRemoteTargetClient
+ : public rpc::SingleThreadedRPCEndpoint<rpc::RawByteChannel> {
+public:
+ /// Remote-mapped RuntimeDyld-compatible memory manager.
+ class RemoteRTDyldMemoryManager : public RuntimeDyld::MemoryManager {
+ friend class OrcRemoteTargetClient;
+
+ public:
+ ~RemoteRTDyldMemoryManager() {
+ Client.destroyRemoteAllocator(Id);
+ DEBUG(dbgs() << "Destroyed remote allocator " << Id << "\n");
+ }
+
+ RemoteRTDyldMemoryManager(const RemoteRTDyldMemoryManager &) = delete;
+ RemoteRTDyldMemoryManager &
+ operator=(const RemoteRTDyldMemoryManager &) = delete;
+ RemoteRTDyldMemoryManager(RemoteRTDyldMemoryManager &&) = default;
+ RemoteRTDyldMemoryManager &
+ operator=(RemoteRTDyldMemoryManager &&) = default;
+
+ uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) override {
+ Unmapped.back().CodeAllocs.emplace_back(Size, Alignment);
+ uint8_t *Alloc = reinterpret_cast<uint8_t *>(
+ Unmapped.back().CodeAllocs.back().getLocalAddress());
+ DEBUG(dbgs() << "Allocator " << Id << " allocated code for "
+ << SectionName << ": " << Alloc << " (" << Size
+ << " bytes, alignment " << Alignment << ")\n");
+ return Alloc;
+ }
+
+ uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID, StringRef SectionName,
+ bool IsReadOnly) override {
+ if (IsReadOnly) {
+ Unmapped.back().RODataAllocs.emplace_back(Size, Alignment);
+ uint8_t *Alloc = reinterpret_cast<uint8_t *>(
+ Unmapped.back().RODataAllocs.back().getLocalAddress());
+ DEBUG(dbgs() << "Allocator " << Id << " allocated ro-data for "
+ << SectionName << ": " << Alloc << " (" << Size
+ << " bytes, alignment " << Alignment << ")\n");
+ return Alloc;
+ } // else...
+
+ Unmapped.back().RWDataAllocs.emplace_back(Size, Alignment);
+ uint8_t *Alloc = reinterpret_cast<uint8_t *>(
+ Unmapped.back().RWDataAllocs.back().getLocalAddress());
+ DEBUG(dbgs() << "Allocator " << Id << " allocated rw-data for "
+ << SectionName << ": " << Alloc << " (" << Size
+ << " bytes, alignment " << Alignment << ")\n");
+ return Alloc;
+ }
+
+ void reserveAllocationSpace(uintptr_t CodeSize, uint32_t CodeAlign,
+ uintptr_t RODataSize, uint32_t RODataAlign,
+ uintptr_t RWDataSize,
+ uint32_t RWDataAlign) override {
+ Unmapped.push_back(ObjectAllocs());
+
+ DEBUG(dbgs() << "Allocator " << Id << " reserved:\n");
+
+ if (CodeSize != 0) {
+ Unmapped.back().RemoteCodeAddr =
+ Client.reserveMem(Id, CodeSize, CodeAlign);
+
+ DEBUG(dbgs() << " code: "
+ << format("0x%016x", Unmapped.back().RemoteCodeAddr)
+ << " (" << CodeSize << " bytes, alignment " << CodeAlign
+ << ")\n");
+ }
+
+ if (RODataSize != 0) {
+ Unmapped.back().RemoteRODataAddr =
+ Client.reserveMem(Id, RODataSize, RODataAlign);
+
+ DEBUG(dbgs() << " ro-data: "
+ << format("0x%016x", Unmapped.back().RemoteRODataAddr)
+ << " (" << RODataSize << " bytes, alignment "
+ << RODataAlign << ")\n");
+ }
+
+ if (RWDataSize != 0) {
+ Unmapped.back().RemoteRWDataAddr =
+ Client.reserveMem(Id, RWDataSize, RWDataAlign);
+
+ DEBUG(dbgs() << " rw-data: "
+ << format("0x%016x", Unmapped.back().RemoteRWDataAddr)
+ << " (" << RWDataSize << " bytes, alignment "
+ << RWDataAlign << ")\n");
+ }
+ }
+
+ bool needsToReserveAllocationSpace() override { return true; }
+
+ void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr,
+ size_t Size) override {
+ UnfinalizedEHFrames.push_back({LoadAddr, Size});
+ }
+
+ void deregisterEHFrames() override {
+ for (auto &Frame : RegisteredEHFrames) {
+ // FIXME: Add error poll.
+ Client.deregisterEHFrames(Frame.Addr, Frame.Size);
+ }
+ }
+
+ void notifyObjectLoaded(RuntimeDyld &Dyld,
+ const object::ObjectFile &Obj) override {
+ DEBUG(dbgs() << "Allocator " << Id << " applied mappings:\n");
+ for (auto &ObjAllocs : Unmapped) {
+ mapAllocsToRemoteAddrs(Dyld, ObjAllocs.CodeAllocs,
+ ObjAllocs.RemoteCodeAddr);
+ mapAllocsToRemoteAddrs(Dyld, ObjAllocs.RODataAllocs,
+ ObjAllocs.RemoteRODataAddr);
+ mapAllocsToRemoteAddrs(Dyld, ObjAllocs.RWDataAllocs,
+ ObjAllocs.RemoteRWDataAddr);
+ Unfinalized.push_back(std::move(ObjAllocs));
+ }
+ Unmapped.clear();
+ }
+
+ bool finalizeMemory(std::string *ErrMsg = nullptr) override {
+ DEBUG(dbgs() << "Allocator " << Id << " finalizing:\n");
+
+ for (auto &ObjAllocs : Unfinalized) {
+ if (copyAndProtect(ObjAllocs.CodeAllocs, ObjAllocs.RemoteCodeAddr,
+ sys::Memory::MF_READ | sys::Memory::MF_EXEC))
+ return true;
+
+ if (copyAndProtect(ObjAllocs.RODataAllocs, ObjAllocs.RemoteRODataAddr,
+ sys::Memory::MF_READ))
+ return true;
+
+ if (copyAndProtect(ObjAllocs.RWDataAllocs, ObjAllocs.RemoteRWDataAddr,
+ sys::Memory::MF_READ | sys::Memory::MF_WRITE))
+ return true;
+ }
+ Unfinalized.clear();
+
+ for (auto &EHFrame : UnfinalizedEHFrames) {
+ if (auto Err = Client.registerEHFrames(EHFrame.Addr, EHFrame.Size)) {
+ // FIXME: Replace this once finalizeMemory can return an Error.
+ handleAllErrors(std::move(Err), [&](ErrorInfoBase &EIB) {
+ if (ErrMsg) {
+ raw_string_ostream ErrOut(*ErrMsg);
+ EIB.log(ErrOut);
+ }
+ });
+ return false;
+ }
+ }
+ RegisteredEHFrames = std::move(UnfinalizedEHFrames);
+ UnfinalizedEHFrames = {};
+
+ return false;
+ }
+
+ private:
+ class Alloc {
+ public:
+ Alloc(uint64_t Size, unsigned Align)
+ : Size(Size), Align(Align), Contents(new char[Size + Align - 1]) {}
+
+ Alloc(const Alloc &) = delete;
+ Alloc &operator=(const Alloc &) = delete;
+ Alloc(Alloc &&) = default;
+ Alloc &operator=(Alloc &&) = default;
+
+ uint64_t getSize() const { return Size; }
+
+ unsigned getAlign() const { return Align; }
+
+ char *getLocalAddress() const {
+ uintptr_t LocalAddr = reinterpret_cast<uintptr_t>(Contents.get());
+ LocalAddr = alignTo(LocalAddr, Align);
+ return reinterpret_cast<char *>(LocalAddr);
+ }
+
+ void setRemoteAddress(JITTargetAddress RemoteAddr) {
+ this->RemoteAddr = RemoteAddr;
+ }
+
+ JITTargetAddress getRemoteAddress() const { return RemoteAddr; }
+
+ private:
+ uint64_t Size;
+ unsigned Align;
+ std::unique_ptr<char[]> Contents;
+ JITTargetAddress RemoteAddr = 0;
+ };
+
+ struct ObjectAllocs {
+ ObjectAllocs() = default;
+ ObjectAllocs(const ObjectAllocs &) = delete;
+ ObjectAllocs &operator=(const ObjectAllocs &) = delete;
+ ObjectAllocs(ObjectAllocs &&) = default;
+ ObjectAllocs &operator=(ObjectAllocs &&) = default;
+
+ JITTargetAddress RemoteCodeAddr = 0;
+ JITTargetAddress RemoteRODataAddr = 0;
+ JITTargetAddress RemoteRWDataAddr = 0;
+ std::vector<Alloc> CodeAllocs, RODataAllocs, RWDataAllocs;
+ };
+
+ RemoteRTDyldMemoryManager(OrcRemoteTargetClient &Client,
+ ResourceIdMgr::ResourceId Id)
+ : Client(Client), Id(Id) {
+ DEBUG(dbgs() << "Created remote allocator " << Id << "\n");
+ }
+
+ // Maps all allocations in Allocs to aligned blocks
+ void mapAllocsToRemoteAddrs(RuntimeDyld &Dyld, std::vector<Alloc> &Allocs,
+ JITTargetAddress NextAddr) {
+ for (auto &Alloc : Allocs) {
+ NextAddr = alignTo(NextAddr, Alloc.getAlign());
+ Dyld.mapSectionAddress(Alloc.getLocalAddress(), NextAddr);
+ DEBUG(dbgs() << " " << static_cast<void *>(Alloc.getLocalAddress())
+ << " -> " << format("0x%016x", NextAddr) << "\n");
+ Alloc.setRemoteAddress(NextAddr);
+
+ // Only advance NextAddr if it was non-null to begin with,
+ // otherwise leave it as null.
+ if (NextAddr)
+ NextAddr += Alloc.getSize();
+ }
+ }
+
+ // Copies data for each alloc in the list, then set permissions on the
+ // segment.
+ bool copyAndProtect(const std::vector<Alloc> &Allocs,
+ JITTargetAddress RemoteSegmentAddr,
+ unsigned Permissions) {
+ if (RemoteSegmentAddr) {
+ assert(!Allocs.empty() && "No sections in allocated segment");
+
+ for (auto &Alloc : Allocs) {
+ DEBUG(dbgs() << " copying section: "
+ << static_cast<void *>(Alloc.getLocalAddress()) << " -> "
+ << format("0x%016x", Alloc.getRemoteAddress()) << " ("
+ << Alloc.getSize() << " bytes)\n";);
+
+ if (Client.writeMem(Alloc.getRemoteAddress(), Alloc.getLocalAddress(),
+ Alloc.getSize()))
+ return true;
+ }
+
+ DEBUG(dbgs() << " setting "
+ << (Permissions & sys::Memory::MF_READ ? 'R' : '-')
+ << (Permissions & sys::Memory::MF_WRITE ? 'W' : '-')
+ << (Permissions & sys::Memory::MF_EXEC ? 'X' : '-')
+ << " permissions on block: "
+ << format("0x%016x", RemoteSegmentAddr) << "\n");
+ if (Client.setProtections(Id, RemoteSegmentAddr, Permissions))
+ return true;
+ }
+ return false;
+ }
+
+ OrcRemoteTargetClient &Client;
+ ResourceIdMgr::ResourceId Id;
+ std::vector<ObjectAllocs> Unmapped;
+ std::vector<ObjectAllocs> Unfinalized;
+
+ struct EHFrame {
+ JITTargetAddress Addr;
+ uint64_t Size;
+ };
+ std::vector<EHFrame> UnfinalizedEHFrames;
+ std::vector<EHFrame> RegisteredEHFrames;
+ };
+
+ /// Remote indirect stubs manager.
+ class RemoteIndirectStubsManager : public IndirectStubsManager {
+ public:
+ RemoteIndirectStubsManager(OrcRemoteTargetClient &Client,
+ ResourceIdMgr::ResourceId Id)
+ : Client(Client), Id(Id) {}
+
+ ~RemoteIndirectStubsManager() override {
+ Client.destroyIndirectStubsManager(Id);
+ }
+
+ Error createStub(StringRef StubName, JITTargetAddress StubAddr,
+ JITSymbolFlags StubFlags) override {
+ if (auto Err = reserveStubs(1))
+ return Err;
+
+ return createStubInternal(StubName, StubAddr, StubFlags);
+ }
+
+ Error createStubs(const StubInitsMap &StubInits) override {
+ if (auto Err = reserveStubs(StubInits.size()))
+ return Err;
+
+ for (auto &Entry : StubInits)
+ if (auto Err = createStubInternal(Entry.first(), Entry.second.first,
+ Entry.second.second))
+ return Err;
+
+ return Error::success();
+ }
+
+ JITSymbol findStub(StringRef Name, bool ExportedStubsOnly) override {
+ auto I = StubIndexes.find(Name);
+ if (I == StubIndexes.end())
+ return nullptr;
+ auto Key = I->second.first;
+ auto Flags = I->second.second;
+ auto StubSymbol = JITSymbol(getStubAddr(Key), Flags);
+ if (ExportedStubsOnly && !StubSymbol.getFlags().isExported())
+ return nullptr;
+ return StubSymbol;
+ }
+
+ JITSymbol findPointer(StringRef Name) override {
+ auto I = StubIndexes.find(Name);
+ if (I == StubIndexes.end())
+ return nullptr;
+ auto Key = I->second.first;
+ auto Flags = I->second.second;
+ return JITSymbol(getPtrAddr(Key), Flags);
+ }
+
+ Error updatePointer(StringRef Name, JITTargetAddress NewAddr) override {
+ auto I = StubIndexes.find(Name);
+ assert(I != StubIndexes.end() && "No stub pointer for symbol");
+ auto Key = I->second.first;
+ return Client.writePointer(getPtrAddr(Key), NewAddr);
+ }
+
+ private:
+ struct RemoteIndirectStubsInfo {
+ JITTargetAddress StubBase;
+ JITTargetAddress PtrBase;
+ unsigned NumStubs;
+ };
+
+ using StubKey = std::pair<uint16_t, uint16_t>;
+
+ Error reserveStubs(unsigned NumStubs) {
+ if (NumStubs <= FreeStubs.size())
+ return Error::success();
+
+ unsigned NewStubsRequired = NumStubs - FreeStubs.size();
+ JITTargetAddress StubBase;
+ JITTargetAddress PtrBase;
+ unsigned NumStubsEmitted;
+
+ if (auto StubInfoOrErr = Client.emitIndirectStubs(Id, NewStubsRequired))
+ std::tie(StubBase, PtrBase, NumStubsEmitted) = *StubInfoOrErr;
+ else
+ return StubInfoOrErr.takeError();
+
+ unsigned NewBlockId = RemoteIndirectStubsInfos.size();
+ RemoteIndirectStubsInfos.push_back({StubBase, PtrBase, NumStubsEmitted});
+
+ for (unsigned I = 0; I < NumStubsEmitted; ++I)
+ FreeStubs.push_back(std::make_pair(NewBlockId, I));
+
+ return Error::success();
+ }
+
+ Error createStubInternal(StringRef StubName, JITTargetAddress InitAddr,
+ JITSymbolFlags StubFlags) {
+ auto Key = FreeStubs.back();
+ FreeStubs.pop_back();
+ StubIndexes[StubName] = std::make_pair(Key, StubFlags);
+ return Client.writePointer(getPtrAddr(Key), InitAddr);
+ }
+
+ JITTargetAddress getStubAddr(StubKey K) {
+ assert(RemoteIndirectStubsInfos[K.first].StubBase != 0 &&
+ "Missing stub address");
+ return RemoteIndirectStubsInfos[K.first].StubBase +
+ K.second * Client.getIndirectStubSize();
+ }
+
+ JITTargetAddress getPtrAddr(StubKey K) {
+ assert(RemoteIndirectStubsInfos[K.first].PtrBase != 0 &&
+ "Missing pointer address");
+ return RemoteIndirectStubsInfos[K.first].PtrBase +
+ K.second * Client.getPointerSize();
+ }
+
+ OrcRemoteTargetClient &Client;
+ ResourceIdMgr::ResourceId Id;
+ std::vector<RemoteIndirectStubsInfo> RemoteIndirectStubsInfos;
+ std::vector<StubKey> FreeStubs;
+ StringMap<std::pair<StubKey, JITSymbolFlags>> StubIndexes;
+ };
+
+ /// Remote compile callback manager.
+ class RemoteCompileCallbackManager : public JITCompileCallbackManager {
+ public:
+ RemoteCompileCallbackManager(OrcRemoteTargetClient &Client,
+ JITTargetAddress ErrorHandlerAddress)
+ : JITCompileCallbackManager(ErrorHandlerAddress), Client(Client) {}
+
+ private:
+ Error grow() override {
+ JITTargetAddress BlockAddr = 0;
+ uint32_t NumTrampolines = 0;
+ if (auto TrampolineInfoOrErr = Client.emitTrampolineBlock())
+ std::tie(BlockAddr, NumTrampolines) = *TrampolineInfoOrErr;
+ else
+ return TrampolineInfoOrErr.takeError();
+
+ uint32_t TrampolineSize = Client.getTrampolineSize();
+ for (unsigned I = 0; I < NumTrampolines; ++I)
+ this->AvailableTrampolines.push_back(BlockAddr + (I * TrampolineSize));
+
+ return Error::success();
+ }
+
+ OrcRemoteTargetClient &Client;
+ };
+
+ /// Create an OrcRemoteTargetClient.
+ /// Channel is the ChannelT instance to communicate on. It is assumed that
+ /// the channel is ready to be read from and written to.
+ static Expected<std::unique_ptr<OrcRemoteTargetClient>>
+ Create(rpc::RawByteChannel &Channel, std::function<void(Error)> ReportError) {
+ Error Err = Error::success();
+ auto Client = std::unique_ptr<OrcRemoteTargetClient>(
+ new OrcRemoteTargetClient(Channel, std::move(ReportError), Err));
+ if (Err)
+ return std::move(Err);
+ return std::move(Client);
+ }
+
+ /// Call the int(void) function at the given address in the target and return
+ /// its result.
+ Expected<int> callIntVoid(JITTargetAddress Addr) {
+ DEBUG(dbgs() << "Calling int(*)(void) " << format("0x%016x", Addr) << "\n");
+ return callB<exec::CallIntVoid>(Addr);
+ }
+
+ /// Call the int(int, char*[]) function at the given address in the target and
+ /// return its result.
+ Expected<int> callMain(JITTargetAddress Addr,
+ const std::vector<std::string> &Args) {
+ DEBUG(dbgs() << "Calling int(*)(int, char*[]) " << format("0x%016x", Addr)
+ << "\n");
+ return callB<exec::CallMain>(Addr, Args);
+ }
+
+ /// Call the void() function at the given address in the target and wait for
+ /// it to finish.
+ Error callVoidVoid(JITTargetAddress Addr) {
+ DEBUG(dbgs() << "Calling void(*)(void) " << format("0x%016x", Addr)
+ << "\n");
+ return callB<exec::CallVoidVoid>(Addr);
+ }
+
+ /// Create an RCMemoryManager which will allocate its memory on the remote
+ /// target.
+ Expected<std::unique_ptr<RemoteRTDyldMemoryManager>>
+ createRemoteMemoryManager() {
+ auto Id = AllocatorIds.getNext();
+ if (auto Err = callB<mem::CreateRemoteAllocator>(Id))
+ return std::move(Err);
+ return std::unique_ptr<RemoteRTDyldMemoryManager>(
+ new RemoteRTDyldMemoryManager(*this, Id));
+ }
+
+ /// Create an RCIndirectStubsManager that will allocate stubs on the remote
+ /// target.
+ Expected<std::unique_ptr<RemoteIndirectStubsManager>>
+ createIndirectStubsManager() {
+ auto Id = IndirectStubOwnerIds.getNext();
+ if (auto Err = callB<stubs::CreateIndirectStubsOwner>(Id))
+ return std::move(Err);
+ return llvm::make_unique<RemoteIndirectStubsManager>(*this, Id);
+ }
+
+ Expected<RemoteCompileCallbackManager &>
+ enableCompileCallbacks(JITTargetAddress ErrorHandlerAddress) {
+ // Emit the resolver block on the JIT server.
+ if (auto Err = callB<stubs::EmitResolverBlock>())
+ return std::move(Err);
+
+ // Create the callback manager.
+ CallbackManager.emplace(*this, ErrorHandlerAddress);
+ RemoteCompileCallbackManager &Mgr = *CallbackManager;
+ return Mgr;
+ }
+
+ /// Search for symbols in the remote process. Note: This should be used by
+ /// symbol resolvers *after* they've searched the local symbol table in the
+ /// JIT stack.
+ Expected<JITTargetAddress> getSymbolAddress(StringRef Name) {
+ return callB<utils::GetSymbolAddress>(Name);
+ }
+
+ /// Get the triple for the remote target.
+ const std::string &getTargetTriple() const { return RemoteTargetTriple; }
+
+ Error terminateSession() { return callB<utils::TerminateSession>(); }
+
+private:
+ OrcRemoteTargetClient(rpc::RawByteChannel &Channel,
+ std::function<void(Error)> ReportError, Error &Err)
+ : rpc::SingleThreadedRPCEndpoint<rpc::RawByteChannel>(Channel, true),
+ ReportError(std::move(ReportError)) {
+ ErrorAsOutParameter EAO(&Err);
+
+ addHandler<utils::RequestCompile>(
+ [this](JITTargetAddress Addr) -> JITTargetAddress {
+ if (CallbackManager)
+ return CallbackManager->executeCompileCallback(Addr);
+ return 0;
+ });
+
+ if (auto RIOrErr = callB<utils::GetRemoteInfo>()) {
+ std::tie(RemoteTargetTriple, RemotePointerSize, RemotePageSize,
+ RemoteTrampolineSize, RemoteIndirectStubSize) = *RIOrErr;
+ Err = Error::success();
+ } else
+ Err = RIOrErr.takeError();
+ }
+
+ void deregisterEHFrames(JITTargetAddress Addr, uint32_t Size) {
+ if (auto Err = callB<eh::RegisterEHFrames>(Addr, Size))
+ ReportError(std::move(Err));
+ }
+
+ void destroyRemoteAllocator(ResourceIdMgr::ResourceId Id) {
+ if (auto Err = callB<mem::DestroyRemoteAllocator>(Id)) {
+ // FIXME: This will be triggered by a removeModuleSet call: Propagate
+ // error return up through that.
+ llvm_unreachable("Failed to destroy remote allocator.");
+ AllocatorIds.release(Id);
+ }
+ }
+
+ void destroyIndirectStubsManager(ResourceIdMgr::ResourceId Id) {
+ IndirectStubOwnerIds.release(Id);
+ if (auto Err = callB<stubs::DestroyIndirectStubsOwner>(Id))
+ ReportError(std::move(Err));
+ }
+
+ Expected<std::tuple<JITTargetAddress, JITTargetAddress, uint32_t>>
+ emitIndirectStubs(ResourceIdMgr::ResourceId Id, uint32_t NumStubsRequired) {
+ return callB<stubs::EmitIndirectStubs>(Id, NumStubsRequired);
+ }
+
+ Expected<std::tuple<JITTargetAddress, uint32_t>> emitTrampolineBlock() {
+ return callB<stubs::EmitTrampolineBlock>();
+ }
+
+ uint32_t getIndirectStubSize() const { return RemoteIndirectStubSize; }
+ uint32_t getPageSize() const { return RemotePageSize; }
+ uint32_t getPointerSize() const { return RemotePointerSize; }
+
+ uint32_t getTrampolineSize() const { return RemoteTrampolineSize; }
+
+ Expected<std::vector<uint8_t>> readMem(char *Dst, JITTargetAddress Src,
+ uint64_t Size) {
+ return callB<mem::ReadMem>(Src, Size);
+ }
+
+ Error registerEHFrames(JITTargetAddress &RAddr, uint32_t Size) {
+ // FIXME: Duplicate error and report it via ReportError too?
+ return callB<eh::RegisterEHFrames>(RAddr, Size);
+ }
+
+ JITTargetAddress reserveMem(ResourceIdMgr::ResourceId Id, uint64_t Size,
+ uint32_t Align) {
+ if (auto AddrOrErr = callB<mem::ReserveMem>(Id, Size, Align))
+ return *AddrOrErr;
+ else {
+ ReportError(AddrOrErr.takeError());
+ return 0;
+ }
+ }
+
+ bool setProtections(ResourceIdMgr::ResourceId Id,
+ JITTargetAddress RemoteSegAddr, unsigned ProtFlags) {
+ if (auto Err = callB<mem::SetProtections>(Id, RemoteSegAddr, ProtFlags)) {
+ ReportError(std::move(Err));
+ return true;
+ } else
+ return false;
+ }
+
+ bool writeMem(JITTargetAddress Addr, const char *Src, uint64_t Size) {
+ if (auto Err = callB<mem::WriteMem>(DirectBufferWriter(Src, Addr, Size))) {
+ ReportError(std::move(Err));
+ return true;
+ } else
+ return false;
+ }
+
+ Error writePointer(JITTargetAddress Addr, JITTargetAddress PtrVal) {
+ return callB<mem::WritePtr>(Addr, PtrVal);
+ }
+
+ static Error doNothing() { return Error::success(); }
+
+ std::function<void(Error)> ReportError;
+ std::string RemoteTargetTriple;
+ uint32_t RemotePointerSize = 0;
+ uint32_t RemotePageSize = 0;
+ uint32_t RemoteTrampolineSize = 0;
+ uint32_t RemoteIndirectStubSize = 0;
+ ResourceIdMgr AllocatorIds, IndirectStubOwnerIds;
+ Optional<RemoteCompileCallbackManager> CallbackManager;
+};
+
+} // end namespace remote
+} // end namespace orc
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETCLIENT_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h
new file mode 100644
index 0000000..bc0da0f
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h
@@ -0,0 +1,377 @@
+//===- OrcRemoteTargetRPCAPI.h - Orc Remote-target RPC API ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Orc remote-target RPC API. It should not be used
+// directly, but is used by the RemoteTargetClient and RemoteTargetServer
+// classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETRPCAPI_H
+#define LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETRPCAPI_H
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/RPCUtils.h"
+#include "llvm/ExecutionEngine/Orc/RawByteChannel.h"
+
+namespace llvm {
+namespace orc {
+
+namespace remote {
+
+/// Template error for missing resources.
+template <typename ResourceIdT>
+class ResourceNotFound
+ : public ErrorInfo<ResourceNotFound<ResourceIdT>> {
+public:
+ static char ID;
+
+ ResourceNotFound(ResourceIdT ResourceId,
+ std::string ResourceDescription = "")
+ : ResourceId(std::move(ResourceId)),
+ ResourceDescription(std::move(ResourceDescription)) {}
+
+ std::error_code convertToErrorCode() const override {
+ return orcError(OrcErrorCode::UnknownResourceHandle);
+ }
+
+ void log(raw_ostream &OS) const override {
+ OS << (ResourceDescription.empty()
+ ? "Remote resource with id "
+ : ResourceDescription)
+ << " " << ResourceId << " not found";
+ }
+
+private:
+ ResourceIdT ResourceId;
+ std::string ResourceDescription;
+};
+
+template <typename ResourceIdT>
+char ResourceNotFound<ResourceIdT>::ID = 0;
+
+class DirectBufferWriter {
+public:
+ DirectBufferWriter() = default;
+ DirectBufferWriter(const char *Src, JITTargetAddress Dst, uint64_t Size)
+ : Src(Src), Dst(Dst), Size(Size) {}
+
+ const char *getSrc() const { return Src; }
+ JITTargetAddress getDst() const { return Dst; }
+ uint64_t getSize() const { return Size; }
+
+private:
+ const char *Src;
+ JITTargetAddress Dst;
+ uint64_t Size;
+};
+
+} // end namespace remote
+
+namespace rpc {
+
+template <>
+class RPCTypeName<JITSymbolFlags> {
+public:
+ static const char *getName() { return "JITSymbolFlags"; }
+};
+
+template <typename ChannelT>
+class SerializationTraits<ChannelT, JITSymbolFlags> {
+public:
+
+ static Error serialize(ChannelT &C, const JITSymbolFlags &Flags) {
+ return serializeSeq(C, static_cast<JITSymbolFlags::UnderlyingType>(Flags),
+ Flags.getTargetFlags());
+ }
+
+ static Error deserialize(ChannelT &C, JITSymbolFlags &Flags) {
+ JITSymbolFlags::UnderlyingType JITFlags;
+ JITSymbolFlags::TargetFlagsType TargetFlags;
+ if (auto Err = deserializeSeq(C, JITFlags, TargetFlags))
+ return Err;
+ Flags = JITSymbolFlags(static_cast<JITSymbolFlags::FlagNames>(JITFlags),
+ TargetFlags);
+ return Error::success();
+ }
+};
+
+template <> class RPCTypeName<remote::DirectBufferWriter> {
+public:
+ static const char *getName() { return "DirectBufferWriter"; }
+};
+
+template <typename ChannelT>
+class SerializationTraits<
+ ChannelT, remote::DirectBufferWriter, remote::DirectBufferWriter,
+ typename std::enable_if<
+ std::is_base_of<RawByteChannel, ChannelT>::value>::type> {
+public:
+ static Error serialize(ChannelT &C, const remote::DirectBufferWriter &DBW) {
+ if (auto EC = serializeSeq(C, DBW.getDst()))
+ return EC;
+ if (auto EC = serializeSeq(C, DBW.getSize()))
+ return EC;
+ return C.appendBytes(DBW.getSrc(), DBW.getSize());
+ }
+
+ static Error deserialize(ChannelT &C, remote::DirectBufferWriter &DBW) {
+ JITTargetAddress Dst;
+ if (auto EC = deserializeSeq(C, Dst))
+ return EC;
+ uint64_t Size;
+ if (auto EC = deserializeSeq(C, Size))
+ return EC;
+ char *Addr = reinterpret_cast<char *>(static_cast<uintptr_t>(Dst));
+
+ DBW = remote::DirectBufferWriter(nullptr, Dst, Size);
+
+ return C.readBytes(Addr, Size);
+ }
+};
+
+} // end namespace rpc
+
+namespace remote {
+
+class ResourceIdMgr {
+public:
+ using ResourceId = uint64_t;
+ static const ResourceId InvalidId = ~0U;
+
+ ResourceIdMgr() = default;
+ explicit ResourceIdMgr(ResourceId FirstValidId)
+ : NextId(std::move(FirstValidId)) {}
+
+ ResourceId getNext() {
+ if (!FreeIds.empty()) {
+ ResourceId I = FreeIds.back();
+ FreeIds.pop_back();
+ return I;
+ }
+ assert(NextId + 1 != ~0ULL && "All ids allocated");
+ return NextId++;
+ }
+
+ void release(ResourceId I) { FreeIds.push_back(I); }
+
+private:
+ ResourceId NextId = 1;
+ std::vector<ResourceId> FreeIds;
+};
+
+/// Registers EH frames on the remote.
+namespace eh {
+
+ /// Registers EH frames on the remote.
+ class RegisterEHFrames
+ : public rpc::Function<RegisterEHFrames,
+ void(JITTargetAddress Addr, uint32_t Size)> {
+ public:
+ static const char *getName() { return "RegisterEHFrames"; }
+ };
+
+ /// Deregisters EH frames on the remote.
+ class DeregisterEHFrames
+ : public rpc::Function<DeregisterEHFrames,
+ void(JITTargetAddress Addr, uint32_t Size)> {
+ public:
+ static const char *getName() { return "DeregisterEHFrames"; }
+ };
+
+} // end namespace eh
+
+/// RPC functions for executing remote code.
+namespace exec {
+
+ /// Call an 'int32_t()'-type function on the remote, returns the called
+ /// function's return value.
+ class CallIntVoid
+ : public rpc::Function<CallIntVoid, int32_t(JITTargetAddress Addr)> {
+ public:
+ static const char *getName() { return "CallIntVoid"; }
+ };
+
+ /// Call an 'int32_t(int32_t, char**)'-type function on the remote, returns the
+ /// called function's return value.
+ class CallMain
+ : public rpc::Function<CallMain, int32_t(JITTargetAddress Addr,
+ std::vector<std::string> Args)> {
+ public:
+ static const char *getName() { return "CallMain"; }
+ };
+
+ /// Calls a 'void()'-type function on the remote, returns when the called
+ /// function completes.
+ class CallVoidVoid
+ : public rpc::Function<CallVoidVoid, void(JITTargetAddress FnAddr)> {
+ public:
+ static const char *getName() { return "CallVoidVoid"; }
+ };
+
+} // end namespace exec
+
+/// RPC functions for remote memory management / inspection / modification.
+namespace mem {
+
+ /// Creates a memory allocator on the remote.
+ class CreateRemoteAllocator
+ : public rpc::Function<CreateRemoteAllocator,
+ void(ResourceIdMgr::ResourceId AllocatorID)> {
+ public:
+ static const char *getName() { return "CreateRemoteAllocator"; }
+ };
+
+ /// Destroys a remote allocator, freeing any memory allocated by it.
+ class DestroyRemoteAllocator
+ : public rpc::Function<DestroyRemoteAllocator,
+ void(ResourceIdMgr::ResourceId AllocatorID)> {
+ public:
+ static const char *getName() { return "DestroyRemoteAllocator"; }
+ };
+
+ /// Read a remote memory block.
+ class ReadMem
+ : public rpc::Function<ReadMem, std::vector<uint8_t>(JITTargetAddress Src,
+ uint64_t Size)> {
+ public:
+ static const char *getName() { return "ReadMem"; }
+ };
+
+ /// Reserve a block of memory on the remote via the given allocator.
+ class ReserveMem
+ : public rpc::Function<ReserveMem,
+ JITTargetAddress(ResourceIdMgr::ResourceId AllocID,
+ uint64_t Size, uint32_t Align)> {
+ public:
+ static const char *getName() { return "ReserveMem"; }
+ };
+
+ /// Set the memory protection on a memory block.
+ class SetProtections
+ : public rpc::Function<SetProtections,
+ void(ResourceIdMgr::ResourceId AllocID,
+ JITTargetAddress Dst, uint32_t ProtFlags)> {
+ public:
+ static const char *getName() { return "SetProtections"; }
+ };
+
+ /// Write to a remote memory block.
+ class WriteMem
+ : public rpc::Function<WriteMem, void(remote::DirectBufferWriter DB)> {
+ public:
+ static const char *getName() { return "WriteMem"; }
+ };
+
+ /// Write to a remote pointer.
+ class WritePtr : public rpc::Function<WritePtr, void(JITTargetAddress Dst,
+ JITTargetAddress Val)> {
+ public:
+ static const char *getName() { return "WritePtr"; }
+ };
+
+} // end namespace mem
+
+/// RPC functions for remote stub and trampoline management.
+namespace stubs {
+
+ /// Creates an indirect stub owner on the remote.
+ class CreateIndirectStubsOwner
+ : public rpc::Function<CreateIndirectStubsOwner,
+ void(ResourceIdMgr::ResourceId StubOwnerID)> {
+ public:
+ static const char *getName() { return "CreateIndirectStubsOwner"; }
+ };
+
+ /// RPC function for destroying an indirect stubs owner.
+ class DestroyIndirectStubsOwner
+ : public rpc::Function<DestroyIndirectStubsOwner,
+ void(ResourceIdMgr::ResourceId StubsOwnerID)> {
+ public:
+ static const char *getName() { return "DestroyIndirectStubsOwner"; }
+ };
+
+ /// EmitIndirectStubs result is (StubsBase, PtrsBase, NumStubsEmitted).
+ class EmitIndirectStubs
+ : public rpc::Function<
+ EmitIndirectStubs,
+ std::tuple<JITTargetAddress, JITTargetAddress, uint32_t>(
+ ResourceIdMgr::ResourceId StubsOwnerID,
+ uint32_t NumStubsRequired)> {
+ public:
+ static const char *getName() { return "EmitIndirectStubs"; }
+ };
+
+ /// RPC function to emit the resolver block and return its address.
+ class EmitResolverBlock : public rpc::Function<EmitResolverBlock, void()> {
+ public:
+ static const char *getName() { return "EmitResolverBlock"; }
+ };
+
+ /// EmitTrampolineBlock result is (BlockAddr, NumTrampolines).
+ class EmitTrampolineBlock
+ : public rpc::Function<EmitTrampolineBlock,
+ std::tuple<JITTargetAddress, uint32_t>()> {
+ public:
+ static const char *getName() { return "EmitTrampolineBlock"; }
+ };
+
+} // end namespace stubs
+
+/// Miscelaneous RPC functions for dealing with remotes.
+namespace utils {
+
+ /// GetRemoteInfo result is (Triple, PointerSize, PageSize, TrampolineSize,
+ /// IndirectStubsSize).
+ class GetRemoteInfo
+ : public rpc::Function<
+ GetRemoteInfo,
+ std::tuple<std::string, uint32_t, uint32_t, uint32_t, uint32_t>()> {
+ public:
+ static const char *getName() { return "GetRemoteInfo"; }
+ };
+
+ /// Get the address of a remote symbol.
+ class GetSymbolAddress
+ : public rpc::Function<GetSymbolAddress,
+ JITTargetAddress(std::string SymbolName)> {
+ public:
+ static const char *getName() { return "GetSymbolAddress"; }
+ };
+
+ /// Request that the host execute a compile callback.
+ class RequestCompile
+ : public rpc::Function<
+ RequestCompile, JITTargetAddress(JITTargetAddress TrampolineAddr)> {
+ public:
+ static const char *getName() { return "RequestCompile"; }
+ };
+
+ /// Notify the remote and terminate the session.
+ class TerminateSession : public rpc::Function<TerminateSession, void()> {
+ public:
+ static const char *getName() { return "TerminateSession"; }
+ };
+
+} // namespace utils
+
+class OrcRemoteTargetRPCAPI
+ : public rpc::SingleThreadedRPCEndpoint<rpc::RawByteChannel> {
+public:
+ // FIXME: Remove constructors once MSVC supports synthesizing move-ops.
+ OrcRemoteTargetRPCAPI(rpc::RawByteChannel &C)
+ : rpc::SingleThreadedRPCEndpoint<rpc::RawByteChannel>(C, true) {}
+};
+
+} // end namespace remote
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETRPCAPI_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetServer.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetServer.h
new file mode 100644
index 0000000..cf419d3
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/OrcRemoteTargetServer.h
@@ -0,0 +1,447 @@
+//===- OrcRemoteTargetServer.h - Orc Remote-target Server -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the OrcRemoteTargetServer class. It can be used to build a
+// JIT server that can execute code sent from an OrcRemoteTargetClient.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETSERVER_H
+#define LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETSERVER_H
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/OrcError.h"
+#include "llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Memory.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+#include <map>
+#include <memory>
+#include <string>
+#include <system_error>
+#include <tuple>
+#include <type_traits>
+#include <vector>
+
+#define DEBUG_TYPE "orc-remote"
+
+namespace llvm {
+namespace orc {
+namespace remote {
+
+template <typename ChannelT, typename TargetT>
+class OrcRemoteTargetServer
+ : public rpc::SingleThreadedRPCEndpoint<rpc::RawByteChannel> {
+public:
+ using SymbolLookupFtor =
+ std::function<JITTargetAddress(const std::string &Name)>;
+
+ using EHFrameRegistrationFtor =
+ std::function<void(uint8_t *Addr, uint32_t Size)>;
+
+ OrcRemoteTargetServer(ChannelT &Channel, SymbolLookupFtor SymbolLookup,
+ EHFrameRegistrationFtor EHFramesRegister,
+ EHFrameRegistrationFtor EHFramesDeregister)
+ : rpc::SingleThreadedRPCEndpoint<rpc::RawByteChannel>(Channel, true),
+ SymbolLookup(std::move(SymbolLookup)),
+ EHFramesRegister(std::move(EHFramesRegister)),
+ EHFramesDeregister(std::move(EHFramesDeregister)) {
+ using ThisT = typename std::remove_reference<decltype(*this)>::type;
+ addHandler<exec::CallIntVoid>(*this, &ThisT::handleCallIntVoid);
+ addHandler<exec::CallMain>(*this, &ThisT::handleCallMain);
+ addHandler<exec::CallVoidVoid>(*this, &ThisT::handleCallVoidVoid);
+ addHandler<mem::CreateRemoteAllocator>(*this,
+ &ThisT::handleCreateRemoteAllocator);
+ addHandler<mem::DestroyRemoteAllocator>(
+ *this, &ThisT::handleDestroyRemoteAllocator);
+ addHandler<mem::ReadMem>(*this, &ThisT::handleReadMem);
+ addHandler<mem::ReserveMem>(*this, &ThisT::handleReserveMem);
+ addHandler<mem::SetProtections>(*this, &ThisT::handleSetProtections);
+ addHandler<mem::WriteMem>(*this, &ThisT::handleWriteMem);
+ addHandler<mem::WritePtr>(*this, &ThisT::handleWritePtr);
+ addHandler<eh::RegisterEHFrames>(*this, &ThisT::handleRegisterEHFrames);
+ addHandler<eh::DeregisterEHFrames>(*this, &ThisT::handleDeregisterEHFrames);
+ addHandler<stubs::CreateIndirectStubsOwner>(
+ *this, &ThisT::handleCreateIndirectStubsOwner);
+ addHandler<stubs::DestroyIndirectStubsOwner>(
+ *this, &ThisT::handleDestroyIndirectStubsOwner);
+ addHandler<stubs::EmitIndirectStubs>(*this,
+ &ThisT::handleEmitIndirectStubs);
+ addHandler<stubs::EmitResolverBlock>(*this,
+ &ThisT::handleEmitResolverBlock);
+ addHandler<stubs::EmitTrampolineBlock>(*this,
+ &ThisT::handleEmitTrampolineBlock);
+ addHandler<utils::GetSymbolAddress>(*this, &ThisT::handleGetSymbolAddress);
+ addHandler<utils::GetRemoteInfo>(*this, &ThisT::handleGetRemoteInfo);
+ addHandler<utils::TerminateSession>(*this, &ThisT::handleTerminateSession);
+ }
+
+ // FIXME: Remove move/copy ops once MSVC supports synthesizing move ops.
+ OrcRemoteTargetServer(const OrcRemoteTargetServer &) = delete;
+ OrcRemoteTargetServer &operator=(const OrcRemoteTargetServer &) = delete;
+
+ OrcRemoteTargetServer(OrcRemoteTargetServer &&Other) = default;
+ OrcRemoteTargetServer &operator=(OrcRemoteTargetServer &&) = delete;
+
+ Expected<JITTargetAddress> requestCompile(JITTargetAddress TrampolineAddr) {
+ return callB<utils::RequestCompile>(TrampolineAddr);
+ }
+
+ bool receivedTerminate() const { return TerminateFlag; }
+
+private:
+ struct Allocator {
+ Allocator() = default;
+ Allocator(Allocator &&Other) : Allocs(std::move(Other.Allocs)) {}
+
+ Allocator &operator=(Allocator &&Other) {
+ Allocs = std::move(Other.Allocs);
+ return *this;
+ }
+
+ ~Allocator() {
+ for (auto &Alloc : Allocs)
+ sys::Memory::releaseMappedMemory(Alloc.second);
+ }
+
+ Error allocate(void *&Addr, size_t Size, uint32_t Align) {
+ std::error_code EC;
+ sys::MemoryBlock MB = sys::Memory::allocateMappedMemory(
+ Size, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC);
+ if (EC)
+ return errorCodeToError(EC);
+
+ Addr = MB.base();
+ assert(Allocs.find(MB.base()) == Allocs.end() && "Duplicate alloc");
+ Allocs[MB.base()] = std::move(MB);
+ return Error::success();
+ }
+
+ Error setProtections(void *block, unsigned Flags) {
+ auto I = Allocs.find(block);
+ if (I == Allocs.end())
+ return errorCodeToError(orcError(OrcErrorCode::RemoteMProtectAddrUnrecognized));
+ return errorCodeToError(
+ sys::Memory::protectMappedMemory(I->second, Flags));
+ }
+
+ private:
+ std::map<void *, sys::MemoryBlock> Allocs;
+ };
+
+ static Error doNothing() { return Error::success(); }
+
+ static JITTargetAddress reenter(void *JITTargetAddr, void *TrampolineAddr) {
+ auto T = static_cast<OrcRemoteTargetServer *>(JITTargetAddr);
+ auto AddrOrErr = T->requestCompile(static_cast<JITTargetAddress>(
+ reinterpret_cast<uintptr_t>(TrampolineAddr)));
+ // FIXME: Allow customizable failure substitution functions.
+ assert(AddrOrErr && "Compile request failed");
+ return *AddrOrErr;
+ }
+
+ Expected<int32_t> handleCallIntVoid(JITTargetAddress Addr) {
+ using IntVoidFnTy = int (*)();
+
+ IntVoidFnTy Fn =
+ reinterpret_cast<IntVoidFnTy>(static_cast<uintptr_t>(Addr));
+
+ DEBUG(dbgs() << " Calling " << format("0x%016x", Addr) << "\n");
+ int Result = Fn();
+ DEBUG(dbgs() << " Result = " << Result << "\n");
+
+ return Result;
+ }
+
+ Expected<int32_t> handleCallMain(JITTargetAddress Addr,
+ std::vector<std::string> Args) {
+ using MainFnTy = int (*)(int, const char *[]);
+
+ MainFnTy Fn = reinterpret_cast<MainFnTy>(static_cast<uintptr_t>(Addr));
+ int ArgC = Args.size() + 1;
+ int Idx = 1;
+ std::unique_ptr<const char *[]> ArgV(new const char *[ArgC + 1]);
+ ArgV[0] = "<jit process>";
+ for (auto &Arg : Args)
+ ArgV[Idx++] = Arg.c_str();
+ ArgV[ArgC] = 0;
+ DEBUG(
+ for (int Idx = 0; Idx < ArgC; ++Idx) {
+ llvm::dbgs() << "Arg " << Idx << ": " << ArgV[Idx] << "\n";
+ }
+ );
+
+ DEBUG(dbgs() << " Calling " << format("0x%016x", Addr) << "\n");
+ int Result = Fn(ArgC, ArgV.get());
+ DEBUG(dbgs() << " Result = " << Result << "\n");
+
+ return Result;
+ }
+
+ Error handleCallVoidVoid(JITTargetAddress Addr) {
+ using VoidVoidFnTy = void (*)();
+
+ VoidVoidFnTy Fn =
+ reinterpret_cast<VoidVoidFnTy>(static_cast<uintptr_t>(Addr));
+
+ DEBUG(dbgs() << " Calling " << format("0x%016x", Addr) << "\n");
+ Fn();
+ DEBUG(dbgs() << " Complete.\n");
+
+ return Error::success();
+ }
+
+ Error handleCreateRemoteAllocator(ResourceIdMgr::ResourceId Id) {
+ auto I = Allocators.find(Id);
+ if (I != Allocators.end())
+ return errorCodeToError(
+ orcError(OrcErrorCode::RemoteAllocatorIdAlreadyInUse));
+ DEBUG(dbgs() << " Created allocator " << Id << "\n");
+ Allocators[Id] = Allocator();
+ return Error::success();
+ }
+
+ Error handleCreateIndirectStubsOwner(ResourceIdMgr::ResourceId Id) {
+ auto I = IndirectStubsOwners.find(Id);
+ if (I != IndirectStubsOwners.end())
+ return errorCodeToError(
+ orcError(OrcErrorCode::RemoteIndirectStubsOwnerIdAlreadyInUse));
+ DEBUG(dbgs() << " Create indirect stubs owner " << Id << "\n");
+ IndirectStubsOwners[Id] = ISBlockOwnerList();
+ return Error::success();
+ }
+
+ Error handleDeregisterEHFrames(JITTargetAddress TAddr, uint32_t Size) {
+ uint8_t *Addr = reinterpret_cast<uint8_t *>(static_cast<uintptr_t>(TAddr));
+ DEBUG(dbgs() << " Registering EH frames at " << format("0x%016x", TAddr)
+ << ", Size = " << Size << " bytes\n");
+ EHFramesDeregister(Addr, Size);
+ return Error::success();
+ }
+
+ Error handleDestroyRemoteAllocator(ResourceIdMgr::ResourceId Id) {
+ auto I = Allocators.find(Id);
+ if (I == Allocators.end())
+ return errorCodeToError(
+ orcError(OrcErrorCode::RemoteAllocatorDoesNotExist));
+ Allocators.erase(I);
+ DEBUG(dbgs() << " Destroyed allocator " << Id << "\n");
+ return Error::success();
+ }
+
+ Error handleDestroyIndirectStubsOwner(ResourceIdMgr::ResourceId Id) {
+ auto I = IndirectStubsOwners.find(Id);
+ if (I == IndirectStubsOwners.end())
+ return errorCodeToError(
+ orcError(OrcErrorCode::RemoteIndirectStubsOwnerDoesNotExist));
+ IndirectStubsOwners.erase(I);
+ return Error::success();
+ }
+
+ Expected<std::tuple<JITTargetAddress, JITTargetAddress, uint32_t>>
+ handleEmitIndirectStubs(ResourceIdMgr::ResourceId Id,
+ uint32_t NumStubsRequired) {
+ DEBUG(dbgs() << " ISMgr " << Id << " request " << NumStubsRequired
+ << " stubs.\n");
+
+ auto StubOwnerItr = IndirectStubsOwners.find(Id);
+ if (StubOwnerItr == IndirectStubsOwners.end())
+ return errorCodeToError(
+ orcError(OrcErrorCode::RemoteIndirectStubsOwnerDoesNotExist));
+
+ typename TargetT::IndirectStubsInfo IS;
+ if (auto Err =
+ TargetT::emitIndirectStubsBlock(IS, NumStubsRequired, nullptr))
+ return std::move(Err);
+
+ JITTargetAddress StubsBase = static_cast<JITTargetAddress>(
+ reinterpret_cast<uintptr_t>(IS.getStub(0)));
+ JITTargetAddress PtrsBase = static_cast<JITTargetAddress>(
+ reinterpret_cast<uintptr_t>(IS.getPtr(0)));
+ uint32_t NumStubsEmitted = IS.getNumStubs();
+
+ auto &BlockList = StubOwnerItr->second;
+ BlockList.push_back(std::move(IS));
+
+ return std::make_tuple(StubsBase, PtrsBase, NumStubsEmitted);
+ }
+
+ Error handleEmitResolverBlock() {
+ std::error_code EC;
+ ResolverBlock = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
+ TargetT::ResolverCodeSize, nullptr,
+ sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
+ if (EC)
+ return errorCodeToError(EC);
+
+ TargetT::writeResolverCode(static_cast<uint8_t *>(ResolverBlock.base()),
+ &reenter, this);
+
+ return errorCodeToError(sys::Memory::protectMappedMemory(
+ ResolverBlock.getMemoryBlock(),
+ sys::Memory::MF_READ | sys::Memory::MF_EXEC));
+ }
+
+ Expected<std::tuple<JITTargetAddress, uint32_t>> handleEmitTrampolineBlock() {
+ std::error_code EC;
+ auto TrampolineBlock =
+ sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
+ sys::Process::getPageSize(), nullptr,
+ sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
+ if (EC)
+ return errorCodeToError(EC);
+
+ uint32_t NumTrampolines =
+ (sys::Process::getPageSize() - TargetT::PointerSize) /
+ TargetT::TrampolineSize;
+
+ uint8_t *TrampolineMem = static_cast<uint8_t *>(TrampolineBlock.base());
+ TargetT::writeTrampolines(TrampolineMem, ResolverBlock.base(),
+ NumTrampolines);
+
+ EC = sys::Memory::protectMappedMemory(TrampolineBlock.getMemoryBlock(),
+ sys::Memory::MF_READ |
+ sys::Memory::MF_EXEC);
+
+ TrampolineBlocks.push_back(std::move(TrampolineBlock));
+
+ auto TrampolineBaseAddr = static_cast<JITTargetAddress>(
+ reinterpret_cast<uintptr_t>(TrampolineMem));
+
+ return std::make_tuple(TrampolineBaseAddr, NumTrampolines);
+ }
+
+ Expected<JITTargetAddress> handleGetSymbolAddress(const std::string &Name) {
+ JITTargetAddress Addr = SymbolLookup(Name);
+ DEBUG(dbgs() << " Symbol '" << Name << "' = " << format("0x%016x", Addr)
+ << "\n");
+ return Addr;
+ }
+
+ Expected<std::tuple<std::string, uint32_t, uint32_t, uint32_t, uint32_t>>
+ handleGetRemoteInfo() {
+ std::string ProcessTriple = sys::getProcessTriple();
+ uint32_t PointerSize = TargetT::PointerSize;
+ uint32_t PageSize = sys::Process::getPageSize();
+ uint32_t TrampolineSize = TargetT::TrampolineSize;
+ uint32_t IndirectStubSize = TargetT::IndirectStubsInfo::StubSize;
+ DEBUG(dbgs() << " Remote info:\n"
+ << " triple = '" << ProcessTriple << "'\n"
+ << " pointer size = " << PointerSize << "\n"
+ << " page size = " << PageSize << "\n"
+ << " trampoline size = " << TrampolineSize << "\n"
+ << " indirect stub size = " << IndirectStubSize << "\n");
+ return std::make_tuple(ProcessTriple, PointerSize, PageSize, TrampolineSize,
+ IndirectStubSize);
+ }
+
+ Expected<std::vector<uint8_t>> handleReadMem(JITTargetAddress RSrc,
+ uint64_t Size) {
+ uint8_t *Src = reinterpret_cast<uint8_t *>(static_cast<uintptr_t>(RSrc));
+
+ DEBUG(dbgs() << " Reading " << Size << " bytes from "
+ << format("0x%016x", RSrc) << "\n");
+
+ std::vector<uint8_t> Buffer;
+ Buffer.resize(Size);
+ for (uint8_t *P = Src; Size != 0; --Size)
+ Buffer.push_back(*P++);
+
+ return Buffer;
+ }
+
+ Error handleRegisterEHFrames(JITTargetAddress TAddr, uint32_t Size) {
+ uint8_t *Addr = reinterpret_cast<uint8_t *>(static_cast<uintptr_t>(TAddr));
+ DEBUG(dbgs() << " Registering EH frames at " << format("0x%016x", TAddr)
+ << ", Size = " << Size << " bytes\n");
+ EHFramesRegister(Addr, Size);
+ return Error::success();
+ }
+
+ Expected<JITTargetAddress> handleReserveMem(ResourceIdMgr::ResourceId Id,
+ uint64_t Size, uint32_t Align) {
+ auto I = Allocators.find(Id);
+ if (I == Allocators.end())
+ return errorCodeToError(
+ orcError(OrcErrorCode::RemoteAllocatorDoesNotExist));
+ auto &Allocator = I->second;
+ void *LocalAllocAddr = nullptr;
+ if (auto Err = Allocator.allocate(LocalAllocAddr, Size, Align))
+ return std::move(Err);
+
+ DEBUG(dbgs() << " Allocator " << Id << " reserved " << LocalAllocAddr
+ << " (" << Size << " bytes, alignment " << Align << ")\n");
+
+ JITTargetAddress AllocAddr = static_cast<JITTargetAddress>(
+ reinterpret_cast<uintptr_t>(LocalAllocAddr));
+
+ return AllocAddr;
+ }
+
+ Error handleSetProtections(ResourceIdMgr::ResourceId Id,
+ JITTargetAddress Addr, uint32_t Flags) {
+ auto I = Allocators.find(Id);
+ if (I == Allocators.end())
+ return errorCodeToError(
+ orcError(OrcErrorCode::RemoteAllocatorDoesNotExist));
+ auto &Allocator = I->second;
+ void *LocalAddr = reinterpret_cast<void *>(static_cast<uintptr_t>(Addr));
+ DEBUG(dbgs() << " Allocator " << Id << " set permissions on " << LocalAddr
+ << " to " << (Flags & sys::Memory::MF_READ ? 'R' : '-')
+ << (Flags & sys::Memory::MF_WRITE ? 'W' : '-')
+ << (Flags & sys::Memory::MF_EXEC ? 'X' : '-') << "\n");
+ return Allocator.setProtections(LocalAddr, Flags);
+ }
+
+ Error handleTerminateSession() {
+ TerminateFlag = true;
+ return Error::success();
+ }
+
+ Error handleWriteMem(DirectBufferWriter DBW) {
+ DEBUG(dbgs() << " Writing " << DBW.getSize() << " bytes to "
+ << format("0x%016x", DBW.getDst()) << "\n");
+ return Error::success();
+ }
+
+ Error handleWritePtr(JITTargetAddress Addr, JITTargetAddress PtrVal) {
+ DEBUG(dbgs() << " Writing pointer *" << format("0x%016x", Addr) << " = "
+ << format("0x%016x", PtrVal) << "\n");
+ uintptr_t *Ptr =
+ reinterpret_cast<uintptr_t *>(static_cast<uintptr_t>(Addr));
+ *Ptr = static_cast<uintptr_t>(PtrVal);
+ return Error::success();
+ }
+
+ SymbolLookupFtor SymbolLookup;
+ EHFrameRegistrationFtor EHFramesRegister, EHFramesDeregister;
+ std::map<ResourceIdMgr::ResourceId, Allocator> Allocators;
+ using ISBlockOwnerList = std::vector<typename TargetT::IndirectStubsInfo>;
+ std::map<ResourceIdMgr::ResourceId, ISBlockOwnerList> IndirectStubsOwners;
+ sys::OwningMemoryBlock ResolverBlock;
+ std::vector<sys::OwningMemoryBlock> TrampolineBlocks;
+ bool TerminateFlag = false;
+};
+
+} // end namespace remote
+} // end namespace orc
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_EXECUTIONENGINE_ORC_ORCREMOTETARGETSERVER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RPCSerialization.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RPCSerialization.h
new file mode 100644
index 0000000..569c506
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RPCSerialization.h
@@ -0,0 +1,609 @@
+//===- llvm/ExecutionEngine/Orc/RPCSerialization.h --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_RPCSERIALIZATION_H
+#define LLVM_EXECUTIONENGINE_ORC_RPCSERIALIZATION_H
+
+#include "OrcError.h"
+#include "llvm/Support/thread.h"
+#include <map>
+#include <mutex>
+#include <sstream>
+
+namespace llvm {
+namespace orc {
+namespace rpc {
+
+template <typename T>
+class RPCTypeName;
+
+/// TypeNameSequence is a utility for rendering sequences of types to a string
+/// by rendering each type, separated by ", ".
+template <typename... ArgTs> class RPCTypeNameSequence {};
+
+/// Render an empty TypeNameSequence to an ostream.
+template <typename OStream>
+OStream &operator<<(OStream &OS, const RPCTypeNameSequence<> &V) {
+ return OS;
+}
+
+/// Render a TypeNameSequence of a single type to an ostream.
+template <typename OStream, typename ArgT>
+OStream &operator<<(OStream &OS, const RPCTypeNameSequence<ArgT> &V) {
+ OS << RPCTypeName<ArgT>::getName();
+ return OS;
+}
+
+/// Render a TypeNameSequence of more than one type to an ostream.
+template <typename OStream, typename ArgT1, typename ArgT2, typename... ArgTs>
+OStream&
+operator<<(OStream &OS, const RPCTypeNameSequence<ArgT1, ArgT2, ArgTs...> &V) {
+ OS << RPCTypeName<ArgT1>::getName() << ", "
+ << RPCTypeNameSequence<ArgT2, ArgTs...>();
+ return OS;
+}
+
+template <>
+class RPCTypeName<void> {
+public:
+ static const char* getName() { return "void"; }
+};
+
+template <>
+class RPCTypeName<int8_t> {
+public:
+ static const char* getName() { return "int8_t"; }
+};
+
+template <>
+class RPCTypeName<uint8_t> {
+public:
+ static const char* getName() { return "uint8_t"; }
+};
+
+template <>
+class RPCTypeName<int16_t> {
+public:
+ static const char* getName() { return "int16_t"; }
+};
+
+template <>
+class RPCTypeName<uint16_t> {
+public:
+ static const char* getName() { return "uint16_t"; }
+};
+
+template <>
+class RPCTypeName<int32_t> {
+public:
+ static const char* getName() { return "int32_t"; }
+};
+
+template <>
+class RPCTypeName<uint32_t> {
+public:
+ static const char* getName() { return "uint32_t"; }
+};
+
+template <>
+class RPCTypeName<int64_t> {
+public:
+ static const char* getName() { return "int64_t"; }
+};
+
+template <>
+class RPCTypeName<uint64_t> {
+public:
+ static const char* getName() { return "uint64_t"; }
+};
+
+template <>
+class RPCTypeName<bool> {
+public:
+ static const char* getName() { return "bool"; }
+};
+
+template <>
+class RPCTypeName<std::string> {
+public:
+ static const char* getName() { return "std::string"; }
+};
+
+template <>
+class RPCTypeName<Error> {
+public:
+ static const char* getName() { return "Error"; }
+};
+
+template <typename T>
+class RPCTypeName<Expected<T>> {
+public:
+ static const char* getName() {
+ std::lock_guard<std::mutex> Lock(NameMutex);
+ if (Name.empty())
+ raw_string_ostream(Name) << "Expected<"
+ << RPCTypeNameSequence<T>()
+ << ">";
+ return Name.data();
+ }
+
+private:
+ static std::mutex NameMutex;
+ static std::string Name;
+};
+
+template <typename T>
+std::mutex RPCTypeName<Expected<T>>::NameMutex;
+
+template <typename T>
+std::string RPCTypeName<Expected<T>>::Name;
+
+template <typename T1, typename T2>
+class RPCTypeName<std::pair<T1, T2>> {
+public:
+ static const char* getName() {
+ std::lock_guard<std::mutex> Lock(NameMutex);
+ if (Name.empty())
+ raw_string_ostream(Name) << "std::pair<" << RPCTypeNameSequence<T1, T2>()
+ << ">";
+ return Name.data();
+ }
+private:
+ static std::mutex NameMutex;
+ static std::string Name;
+};
+
+template <typename T1, typename T2>
+std::mutex RPCTypeName<std::pair<T1, T2>>::NameMutex;
+template <typename T1, typename T2>
+std::string RPCTypeName<std::pair<T1, T2>>::Name;
+
+template <typename... ArgTs>
+class RPCTypeName<std::tuple<ArgTs...>> {
+public:
+ static const char* getName() {
+ std::lock_guard<std::mutex> Lock(NameMutex);
+ if (Name.empty())
+ raw_string_ostream(Name) << "std::tuple<"
+ << RPCTypeNameSequence<ArgTs...>() << ">";
+ return Name.data();
+ }
+private:
+ static std::mutex NameMutex;
+ static std::string Name;
+};
+
+template <typename... ArgTs>
+std::mutex RPCTypeName<std::tuple<ArgTs...>>::NameMutex;
+template <typename... ArgTs>
+std::string RPCTypeName<std::tuple<ArgTs...>>::Name;
+
+template <typename T>
+class RPCTypeName<std::vector<T>> {
+public:
+ static const char*getName() {
+ std::lock_guard<std::mutex> Lock(NameMutex);
+ if (Name.empty())
+ raw_string_ostream(Name) << "std::vector<" << RPCTypeName<T>::getName()
+ << ">";
+ return Name.data();
+ }
+
+private:
+ static std::mutex NameMutex;
+ static std::string Name;
+};
+
+template <typename T>
+std::mutex RPCTypeName<std::vector<T>>::NameMutex;
+template <typename T>
+std::string RPCTypeName<std::vector<T>>::Name;
+
+
+/// The SerializationTraits<ChannelT, T> class describes how to serialize and
+/// deserialize an instance of type T to/from an abstract channel of type
+/// ChannelT. It also provides a representation of the type's name via the
+/// getName method.
+///
+/// Specializations of this class should provide the following functions:
+///
+/// @code{.cpp}
+///
+/// static const char* getName();
+/// static Error serialize(ChannelT&, const T&);
+/// static Error deserialize(ChannelT&, T&);
+///
+/// @endcode
+///
+/// The third argument of SerializationTraits is intended to support SFINAE.
+/// E.g.:
+///
+/// @code{.cpp}
+///
+/// class MyVirtualChannel { ... };
+///
+/// template <DerivedChannelT>
+/// class SerializationTraits<DerivedChannelT, bool,
+/// typename std::enable_if<
+/// std::is_base_of<VirtChannel, DerivedChannel>::value
+/// >::type> {
+/// public:
+/// static const char* getName() { ... };
+/// }
+///
+/// @endcode
+template <typename ChannelT, typename WireType,
+ typename ConcreteType = WireType, typename = void>
+class SerializationTraits;
+
+template <typename ChannelT>
+class SequenceTraits {
+public:
+ static Error emitSeparator(ChannelT &C) { return Error::success(); }
+ static Error consumeSeparator(ChannelT &C) { return Error::success(); }
+};
+
+/// Utility class for serializing sequences of values of varying types.
+/// Specializations of this class contain 'serialize' and 'deserialize' methods
+/// for the given channel. The ArgTs... list will determine the "over-the-wire"
+/// types to be serialized. The serialize and deserialize methods take a list
+/// CArgTs... ("caller arg types") which must be the same length as ArgTs...,
+/// but may be different types from ArgTs, provided that for each CArgT there
+/// is a SerializationTraits specialization
+/// SerializeTraits<ChannelT, ArgT, CArgT> with methods that can serialize the
+/// caller argument to over-the-wire value.
+template <typename ChannelT, typename... ArgTs>
+class SequenceSerialization;
+
+template <typename ChannelT>
+class SequenceSerialization<ChannelT> {
+public:
+ static Error serialize(ChannelT &C) { return Error::success(); }
+ static Error deserialize(ChannelT &C) { return Error::success(); }
+};
+
+template <typename ChannelT, typename ArgT>
+class SequenceSerialization<ChannelT, ArgT> {
+public:
+
+ template <typename CArgT>
+ static Error serialize(ChannelT &C, CArgT &&CArg) {
+ return SerializationTraits<ChannelT, ArgT,
+ typename std::decay<CArgT>::type>::
+ serialize(C, std::forward<CArgT>(CArg));
+ }
+
+ template <typename CArgT>
+ static Error deserialize(ChannelT &C, CArgT &CArg) {
+ return SerializationTraits<ChannelT, ArgT, CArgT>::deserialize(C, CArg);
+ }
+};
+
+template <typename ChannelT, typename ArgT, typename... ArgTs>
+class SequenceSerialization<ChannelT, ArgT, ArgTs...> {
+public:
+
+ template <typename CArgT, typename... CArgTs>
+ static Error serialize(ChannelT &C, CArgT &&CArg,
+ CArgTs &&... CArgs) {
+ if (auto Err =
+ SerializationTraits<ChannelT, ArgT, typename std::decay<CArgT>::type>::
+ serialize(C, std::forward<CArgT>(CArg)))
+ return Err;
+ if (auto Err = SequenceTraits<ChannelT>::emitSeparator(C))
+ return Err;
+ return SequenceSerialization<ChannelT, ArgTs...>::
+ serialize(C, std::forward<CArgTs>(CArgs)...);
+ }
+
+ template <typename CArgT, typename... CArgTs>
+ static Error deserialize(ChannelT &C, CArgT &CArg,
+ CArgTs &... CArgs) {
+ if (auto Err =
+ SerializationTraits<ChannelT, ArgT, CArgT>::deserialize(C, CArg))
+ return Err;
+ if (auto Err = SequenceTraits<ChannelT>::consumeSeparator(C))
+ return Err;
+ return SequenceSerialization<ChannelT, ArgTs...>::deserialize(C, CArgs...);
+ }
+};
+
+template <typename ChannelT, typename... ArgTs>
+Error serializeSeq(ChannelT &C, ArgTs &&... Args) {
+ return SequenceSerialization<ChannelT, typename std::decay<ArgTs>::type...>::
+ serialize(C, std::forward<ArgTs>(Args)...);
+}
+
+template <typename ChannelT, typename... ArgTs>
+Error deserializeSeq(ChannelT &C, ArgTs &... Args) {
+ return SequenceSerialization<ChannelT, ArgTs...>::deserialize(C, Args...);
+}
+
+template <typename ChannelT>
+class SerializationTraits<ChannelT, Error> {
+public:
+
+ using WrappedErrorSerializer =
+ std::function<Error(ChannelT &C, const ErrorInfoBase&)>;
+
+ using WrappedErrorDeserializer =
+ std::function<Error(ChannelT &C, Error &Err)>;
+
+ template <typename ErrorInfoT, typename SerializeFtor,
+ typename DeserializeFtor>
+ static void registerErrorType(std::string Name, SerializeFtor Serialize,
+ DeserializeFtor Deserialize) {
+ assert(!Name.empty() &&
+ "The empty string is reserved for the Success value");
+
+ const std::string *KeyName = nullptr;
+ {
+ // We're abusing the stability of std::map here: We take a reference to the
+ // key of the deserializers map to save us from duplicating the string in
+ // the serializer. This should be changed to use a stringpool if we switch
+ // to a map type that may move keys in memory.
+ std::lock_guard<std::recursive_mutex> Lock(DeserializersMutex);
+ auto I =
+ Deserializers.insert(Deserializers.begin(),
+ std::make_pair(std::move(Name),
+ std::move(Deserialize)));
+ KeyName = &I->first;
+ }
+
+ {
+ assert(KeyName != nullptr && "No keyname pointer");
+ std::lock_guard<std::recursive_mutex> Lock(SerializersMutex);
+ // FIXME: Move capture Serialize once we have C++14.
+ Serializers[ErrorInfoT::classID()] =
+ [KeyName, Serialize](ChannelT &C, const ErrorInfoBase &EIB) -> Error {
+ assert(EIB.dynamicClassID() == ErrorInfoT::classID() &&
+ "Serializer called for wrong error type");
+ if (auto Err = serializeSeq(C, *KeyName))
+ return Err;
+ return Serialize(C, static_cast<const ErrorInfoT &>(EIB));
+ };
+ }
+ }
+
+ static Error serialize(ChannelT &C, Error &&Err) {
+ std::lock_guard<std::recursive_mutex> Lock(SerializersMutex);
+
+ if (!Err)
+ return serializeSeq(C, std::string());
+
+ return handleErrors(std::move(Err),
+ [&C](const ErrorInfoBase &EIB) {
+ auto SI = Serializers.find(EIB.dynamicClassID());
+ if (SI == Serializers.end())
+ return serializeAsStringError(C, EIB);
+ return (SI->second)(C, EIB);
+ });
+ }
+
+ static Error deserialize(ChannelT &C, Error &Err) {
+ std::lock_guard<std::recursive_mutex> Lock(DeserializersMutex);
+
+ std::string Key;
+ if (auto Err = deserializeSeq(C, Key))
+ return Err;
+
+ if (Key.empty()) {
+ ErrorAsOutParameter EAO(&Err);
+ Err = Error::success();
+ return Error::success();
+ }
+
+ auto DI = Deserializers.find(Key);
+ assert(DI != Deserializers.end() && "No deserializer for error type");
+ return (DI->second)(C, Err);
+ }
+
+private:
+
+ static Error serializeAsStringError(ChannelT &C, const ErrorInfoBase &EIB) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ EIB.log(ErrMsgStream);
+ }
+ return serialize(C, make_error<StringError>(std::move(ErrMsg),
+ inconvertibleErrorCode()));
+ }
+
+ static std::recursive_mutex SerializersMutex;
+ static std::recursive_mutex DeserializersMutex;
+ static std::map<const void*, WrappedErrorSerializer> Serializers;
+ static std::map<std::string, WrappedErrorDeserializer> Deserializers;
+};
+
+template <typename ChannelT>
+std::recursive_mutex SerializationTraits<ChannelT, Error>::SerializersMutex;
+
+template <typename ChannelT>
+std::recursive_mutex SerializationTraits<ChannelT, Error>::DeserializersMutex;
+
+template <typename ChannelT>
+std::map<const void*,
+ typename SerializationTraits<ChannelT, Error>::WrappedErrorSerializer>
+SerializationTraits<ChannelT, Error>::Serializers;
+
+template <typename ChannelT>
+std::map<std::string,
+ typename SerializationTraits<ChannelT, Error>::WrappedErrorDeserializer>
+SerializationTraits<ChannelT, Error>::Deserializers;
+
+/// Registers a serializer and deserializer for the given error type on the
+/// given channel type.
+template <typename ChannelT, typename ErrorInfoT, typename SerializeFtor,
+ typename DeserializeFtor>
+void registerErrorSerialization(std::string Name, SerializeFtor &&Serialize,
+ DeserializeFtor &&Deserialize) {
+ SerializationTraits<ChannelT, Error>::template registerErrorType<ErrorInfoT>(
+ std::move(Name),
+ std::forward<SerializeFtor>(Serialize),
+ std::forward<DeserializeFtor>(Deserialize));
+}
+
+/// Registers serialization/deserialization for StringError.
+template <typename ChannelT>
+void registerStringError() {
+ static bool AlreadyRegistered = false;
+ if (!AlreadyRegistered) {
+ registerErrorSerialization<ChannelT, StringError>(
+ "StringError",
+ [](ChannelT &C, const StringError &SE) {
+ return serializeSeq(C, SE.getMessage());
+ },
+ [](ChannelT &C, Error &Err) -> Error {
+ ErrorAsOutParameter EAO(&Err);
+ std::string Msg;
+ if (auto E2 = deserializeSeq(C, Msg))
+ return E2;
+ Err =
+ make_error<StringError>(std::move(Msg),
+ orcError(
+ OrcErrorCode::UnknownErrorCodeFromRemote));
+ return Error::success();
+ });
+ AlreadyRegistered = true;
+ }
+}
+
+/// SerializationTraits for Expected<T1> from an Expected<T2>.
+template <typename ChannelT, typename T1, typename T2>
+class SerializationTraits<ChannelT, Expected<T1>, Expected<T2>> {
+public:
+
+ static Error serialize(ChannelT &C, Expected<T2> &&ValOrErr) {
+ if (ValOrErr) {
+ if (auto Err = serializeSeq(C, true))
+ return Err;
+ return SerializationTraits<ChannelT, T1, T2>::serialize(C, *ValOrErr);
+ }
+ if (auto Err = serializeSeq(C, false))
+ return Err;
+ return serializeSeq(C, ValOrErr.takeError());
+ }
+
+ static Error deserialize(ChannelT &C, Expected<T2> &ValOrErr) {
+ ExpectedAsOutParameter<T2> EAO(&ValOrErr);
+ bool HasValue;
+ if (auto Err = deserializeSeq(C, HasValue))
+ return Err;
+ if (HasValue)
+ return SerializationTraits<ChannelT, T1, T2>::deserialize(C, *ValOrErr);
+ Error Err = Error::success();
+ if (auto E2 = deserializeSeq(C, Err))
+ return E2;
+ ValOrErr = std::move(Err);
+ return Error::success();
+ }
+};
+
+/// SerializationTraits for Expected<T1> from a T2.
+template <typename ChannelT, typename T1, typename T2>
+class SerializationTraits<ChannelT, Expected<T1>, T2> {
+public:
+
+ static Error serialize(ChannelT &C, T2 &&Val) {
+ return serializeSeq(C, Expected<T2>(std::forward<T2>(Val)));
+ }
+};
+
+/// SerializationTraits for Expected<T1> from an Error.
+template <typename ChannelT, typename T>
+class SerializationTraits<ChannelT, Expected<T>, Error> {
+public:
+
+ static Error serialize(ChannelT &C, Error &&Err) {
+ return serializeSeq(C, Expected<T>(std::move(Err)));
+ }
+};
+
+/// SerializationTraits default specialization for std::pair.
+template <typename ChannelT, typename T1, typename T2>
+class SerializationTraits<ChannelT, std::pair<T1, T2>> {
+public:
+ static Error serialize(ChannelT &C, const std::pair<T1, T2> &V) {
+ return serializeSeq(C, V.first, V.second);
+ }
+
+ static Error deserialize(ChannelT &C, std::pair<T1, T2> &V) {
+ return deserializeSeq(C, V.first, V.second);
+ }
+};
+
+/// SerializationTraits default specialization for std::tuple.
+template <typename ChannelT, typename... ArgTs>
+class SerializationTraits<ChannelT, std::tuple<ArgTs...>> {
+public:
+
+ /// RPC channel serialization for std::tuple.
+ static Error serialize(ChannelT &C, const std::tuple<ArgTs...> &V) {
+ return serializeTupleHelper(C, V, llvm::index_sequence_for<ArgTs...>());
+ }
+
+ /// RPC channel deserialization for std::tuple.
+ static Error deserialize(ChannelT &C, std::tuple<ArgTs...> &V) {
+ return deserializeTupleHelper(C, V, llvm::index_sequence_for<ArgTs...>());
+ }
+
+private:
+ // Serialization helper for std::tuple.
+ template <size_t... Is>
+ static Error serializeTupleHelper(ChannelT &C, const std::tuple<ArgTs...> &V,
+ llvm::index_sequence<Is...> _) {
+ return serializeSeq(C, std::get<Is>(V)...);
+ }
+
+ // Serialization helper for std::tuple.
+ template <size_t... Is>
+ static Error deserializeTupleHelper(ChannelT &C, std::tuple<ArgTs...> &V,
+ llvm::index_sequence<Is...> _) {
+ return deserializeSeq(C, std::get<Is>(V)...);
+ }
+};
+
+/// SerializationTraits default specialization for std::vector.
+template <typename ChannelT, typename T>
+class SerializationTraits<ChannelT, std::vector<T>> {
+public:
+
+ /// Serialize a std::vector<T> from std::vector<T>.
+ static Error serialize(ChannelT &C, const std::vector<T> &V) {
+ if (auto Err = serializeSeq(C, static_cast<uint64_t>(V.size())))
+ return Err;
+
+ for (const auto &E : V)
+ if (auto Err = serializeSeq(C, E))
+ return Err;
+
+ return Error::success();
+ }
+
+ /// Deserialize a std::vector<T> to a std::vector<T>.
+ static Error deserialize(ChannelT &C, std::vector<T> &V) {
+ uint64_t Count = 0;
+ if (auto Err = deserializeSeq(C, Count))
+ return Err;
+
+ V.resize(Count);
+ for (auto &E : V)
+ if (auto Err = deserializeSeq(C, E))
+ return Err;
+
+ return Error::success();
+ }
+};
+
+} // end namespace rpc
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_RPCSERIALIZATION_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RPCUtils.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RPCUtils.h
new file mode 100644
index 0000000..c278cb1
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RPCUtils.h
@@ -0,0 +1,1752 @@
+//===------- RPCUTils.h - Utilities for building RPC APIs -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Utilities to support construction of simple RPC APIs.
+//
+// The RPC utilities aim for ease of use (minimal conceptual overhead) for C++
+// programmers, high performance, low memory overhead, and efficient use of the
+// communications channel.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_RPCUTILS_H
+#define LLVM_EXECUTIONENGINE_ORC_RPCUTILS_H
+
+#include <map>
+#include <thread>
+#include <vector>
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ExecutionEngine/Orc/OrcError.h"
+#include "llvm/ExecutionEngine/Orc/RPCSerialization.h"
+
+#include <future>
+
+namespace llvm {
+namespace orc {
+namespace rpc {
+
+/// Base class of all fatal RPC errors (those that necessarily result in the
+/// termination of the RPC session).
+class RPCFatalError : public ErrorInfo<RPCFatalError> {
+public:
+ static char ID;
+};
+
+/// RPCConnectionClosed is returned from RPC operations if the RPC connection
+/// has already been closed due to either an error or graceful disconnection.
+class ConnectionClosed : public ErrorInfo<ConnectionClosed> {
+public:
+ static char ID;
+ std::error_code convertToErrorCode() const override;
+ void log(raw_ostream &OS) const override;
+};
+
+/// BadFunctionCall is returned from handleOne when the remote makes a call with
+/// an unrecognized function id.
+///
+/// This error is fatal because Orc RPC needs to know how to parse a function
+/// call to know where the next call starts, and if it doesn't recognize the
+/// function id it cannot parse the call.
+template <typename FnIdT, typename SeqNoT>
+class BadFunctionCall
+ : public ErrorInfo<BadFunctionCall<FnIdT, SeqNoT>, RPCFatalError> {
+public:
+ static char ID;
+
+ BadFunctionCall(FnIdT FnId, SeqNoT SeqNo)
+ : FnId(std::move(FnId)), SeqNo(std::move(SeqNo)) {}
+
+ std::error_code convertToErrorCode() const override {
+ return orcError(OrcErrorCode::UnexpectedRPCCall);
+ }
+
+ void log(raw_ostream &OS) const override {
+ OS << "Call to invalid RPC function id '" << FnId << "' with "
+ "sequence number " << SeqNo;
+ }
+
+private:
+ FnIdT FnId;
+ SeqNoT SeqNo;
+};
+
+template <typename FnIdT, typename SeqNoT>
+char BadFunctionCall<FnIdT, SeqNoT>::ID = 0;
+
+/// InvalidSequenceNumberForResponse is returned from handleOne when a response
+/// call arrives with a sequence number that doesn't correspond to any in-flight
+/// function call.
+///
+/// This error is fatal because Orc RPC needs to know how to parse the rest of
+/// the response call to know where the next call starts, and if it doesn't have
+/// a result parser for this sequence number it can't do that.
+template <typename SeqNoT>
+class InvalidSequenceNumberForResponse
+ : public ErrorInfo<InvalidSequenceNumberForResponse<SeqNoT>, RPCFatalError> {
+public:
+ static char ID;
+
+ InvalidSequenceNumberForResponse(SeqNoT SeqNo)
+ : SeqNo(std::move(SeqNo)) {}
+
+ std::error_code convertToErrorCode() const override {
+ return orcError(OrcErrorCode::UnexpectedRPCCall);
+ };
+
+ void log(raw_ostream &OS) const override {
+ OS << "Response has unknown sequence number " << SeqNo;
+ }
+private:
+ SeqNoT SeqNo;
+};
+
+template <typename SeqNoT>
+char InvalidSequenceNumberForResponse<SeqNoT>::ID = 0;
+
+/// This non-fatal error will be passed to asynchronous result handlers in place
+/// of a result if the connection goes down before a result returns, or if the
+/// function to be called cannot be negotiated with the remote.
+class ResponseAbandoned : public ErrorInfo<ResponseAbandoned> {
+public:
+ static char ID;
+
+ std::error_code convertToErrorCode() const override;
+ void log(raw_ostream &OS) const override;
+};
+
+/// This error is returned if the remote does not have a handler installed for
+/// the given RPC function.
+class CouldNotNegotiate : public ErrorInfo<CouldNotNegotiate> {
+public:
+ static char ID;
+
+ CouldNotNegotiate(std::string Signature);
+ std::error_code convertToErrorCode() const override;
+ void log(raw_ostream &OS) const override;
+ const std::string &getSignature() const { return Signature; }
+private:
+ std::string Signature;
+};
+
+template <typename DerivedFunc, typename FnT> class Function;
+
+// RPC Function class.
+// DerivedFunc should be a user defined class with a static 'getName()' method
+// returning a const char* representing the function's name.
+template <typename DerivedFunc, typename RetT, typename... ArgTs>
+class Function<DerivedFunc, RetT(ArgTs...)> {
+public:
+ /// User defined function type.
+ using Type = RetT(ArgTs...);
+
+ /// Return type.
+ using ReturnType = RetT;
+
+ /// Returns the full function prototype as a string.
+ static const char *getPrototype() {
+ std::lock_guard<std::mutex> Lock(NameMutex);
+ if (Name.empty())
+ raw_string_ostream(Name)
+ << RPCTypeName<RetT>::getName() << " " << DerivedFunc::getName()
+ << "(" << llvm::orc::rpc::RPCTypeNameSequence<ArgTs...>() << ")";
+ return Name.data();
+ }
+
+private:
+ static std::mutex NameMutex;
+ static std::string Name;
+};
+
+template <typename DerivedFunc, typename RetT, typename... ArgTs>
+std::mutex Function<DerivedFunc, RetT(ArgTs...)>::NameMutex;
+
+template <typename DerivedFunc, typename RetT, typename... ArgTs>
+std::string Function<DerivedFunc, RetT(ArgTs...)>::Name;
+
+/// Allocates RPC function ids during autonegotiation.
+/// Specializations of this class must provide four members:
+///
+/// static T getInvalidId():
+/// Should return a reserved id that will be used to represent missing
+/// functions during autonegotiation.
+///
+/// static T getResponseId():
+/// Should return a reserved id that will be used to send function responses
+/// (return values).
+///
+/// static T getNegotiateId():
+/// Should return a reserved id for the negotiate function, which will be used
+/// to negotiate ids for user defined functions.
+///
+/// template <typename Func> T allocate():
+/// Allocate a unique id for function Func.
+template <typename T, typename = void> class RPCFunctionIdAllocator;
+
+/// This specialization of RPCFunctionIdAllocator provides a default
+/// implementation for integral types.
+template <typename T>
+class RPCFunctionIdAllocator<
+ T, typename std::enable_if<std::is_integral<T>::value>::type> {
+public:
+ static T getInvalidId() { return T(0); }
+ static T getResponseId() { return T(1); }
+ static T getNegotiateId() { return T(2); }
+
+ template <typename Func> T allocate() { return NextId++; }
+
+private:
+ T NextId = 3;
+};
+
+namespace detail {
+
+// FIXME: Remove MSVCPError/MSVCPExpected once MSVC's future implementation
+// supports classes without default constructors.
+#ifdef _MSC_VER
+
+namespace msvc_hacks {
+
+// Work around MSVC's future implementation's use of default constructors:
+// A default constructed value in the promise will be overwritten when the
+// real error is set - so the default constructed Error has to be checked
+// already.
+class MSVCPError : public Error {
+public:
+ MSVCPError() { (void)!!*this; }
+
+ MSVCPError(MSVCPError &&Other) : Error(std::move(Other)) {}
+
+ MSVCPError &operator=(MSVCPError Other) {
+ Error::operator=(std::move(Other));
+ return *this;
+ }
+
+ MSVCPError(Error Err) : Error(std::move(Err)) {}
+};
+
+// Work around MSVC's future implementation, similar to MSVCPError.
+template <typename T> class MSVCPExpected : public Expected<T> {
+public:
+ MSVCPExpected()
+ : Expected<T>(make_error<StringError>("", inconvertibleErrorCode())) {
+ consumeError(this->takeError());
+ }
+
+ MSVCPExpected(MSVCPExpected &&Other) : Expected<T>(std::move(Other)) {}
+
+ MSVCPExpected &operator=(MSVCPExpected &&Other) {
+ Expected<T>::operator=(std::move(Other));
+ return *this;
+ }
+
+ MSVCPExpected(Error Err) : Expected<T>(std::move(Err)) {}
+
+ template <typename OtherT>
+ MSVCPExpected(
+ OtherT &&Val,
+ typename std::enable_if<std::is_convertible<OtherT, T>::value>::type * =
+ nullptr)
+ : Expected<T>(std::move(Val)) {}
+
+ template <class OtherT>
+ MSVCPExpected(
+ Expected<OtherT> &&Other,
+ typename std::enable_if<std::is_convertible<OtherT, T>::value>::type * =
+ nullptr)
+ : Expected<T>(std::move(Other)) {}
+
+ template <class OtherT>
+ explicit MSVCPExpected(
+ Expected<OtherT> &&Other,
+ typename std::enable_if<!std::is_convertible<OtherT, T>::value>::type * =
+ nullptr)
+ : Expected<T>(std::move(Other)) {}
+};
+
+} // end namespace msvc_hacks
+
+#endif // _MSC_VER
+
+/// Provides a typedef for a tuple containing the decayed argument types.
+template <typename T> class FunctionArgsTuple;
+
+template <typename RetT, typename... ArgTs>
+class FunctionArgsTuple<RetT(ArgTs...)> {
+public:
+ using Type = std::tuple<typename std::decay<
+ typename std::remove_reference<ArgTs>::type>::type...>;
+};
+
+// ResultTraits provides typedefs and utilities specific to the return type
+// of functions.
+template <typename RetT> class ResultTraits {
+public:
+ // The return type wrapped in llvm::Expected.
+ using ErrorReturnType = Expected<RetT>;
+
+#ifdef _MSC_VER
+ // The ErrorReturnType wrapped in a std::promise.
+ using ReturnPromiseType = std::promise<msvc_hacks::MSVCPExpected<RetT>>;
+
+ // The ErrorReturnType wrapped in a std::future.
+ using ReturnFutureType = std::future<msvc_hacks::MSVCPExpected<RetT>>;
+#else
+ // The ErrorReturnType wrapped in a std::promise.
+ using ReturnPromiseType = std::promise<ErrorReturnType>;
+
+ // The ErrorReturnType wrapped in a std::future.
+ using ReturnFutureType = std::future<ErrorReturnType>;
+#endif
+
+ // Create a 'blank' value of the ErrorReturnType, ready and safe to
+ // overwrite.
+ static ErrorReturnType createBlankErrorReturnValue() {
+ return ErrorReturnType(RetT());
+ }
+
+ // Consume an abandoned ErrorReturnType.
+ static void consumeAbandoned(ErrorReturnType RetOrErr) {
+ consumeError(RetOrErr.takeError());
+ }
+};
+
+// ResultTraits specialization for void functions.
+template <> class ResultTraits<void> {
+public:
+ // For void functions, ErrorReturnType is llvm::Error.
+ using ErrorReturnType = Error;
+
+#ifdef _MSC_VER
+ // The ErrorReturnType wrapped in a std::promise.
+ using ReturnPromiseType = std::promise<msvc_hacks::MSVCPError>;
+
+ // The ErrorReturnType wrapped in a std::future.
+ using ReturnFutureType = std::future<msvc_hacks::MSVCPError>;
+#else
+ // The ErrorReturnType wrapped in a std::promise.
+ using ReturnPromiseType = std::promise<ErrorReturnType>;
+
+ // The ErrorReturnType wrapped in a std::future.
+ using ReturnFutureType = std::future<ErrorReturnType>;
+#endif
+
+ // Create a 'blank' value of the ErrorReturnType, ready and safe to
+ // overwrite.
+ static ErrorReturnType createBlankErrorReturnValue() {
+ return ErrorReturnType::success();
+ }
+
+ // Consume an abandoned ErrorReturnType.
+ static void consumeAbandoned(ErrorReturnType Err) {
+ consumeError(std::move(Err));
+ }
+};
+
+// ResultTraits<Error> is equivalent to ResultTraits<void>. This allows
+// handlers for void RPC functions to return either void (in which case they
+// implicitly succeed) or Error (in which case their error return is
+// propagated). See usage in HandlerTraits::runHandlerHelper.
+template <> class ResultTraits<Error> : public ResultTraits<void> {};
+
+// ResultTraits<Expected<T>> is equivalent to ResultTraits<T>. This allows
+// handlers for RPC functions returning a T to return either a T (in which
+// case they implicitly succeed) or Expected<T> (in which case their error
+// return is propagated). See usage in HandlerTraits::runHandlerHelper.
+template <typename RetT>
+class ResultTraits<Expected<RetT>> : public ResultTraits<RetT> {};
+
+// Determines whether an RPC function's defined error return type supports
+// error return value.
+template <typename T>
+class SupportsErrorReturn {
+public:
+ static const bool value = false;
+};
+
+template <>
+class SupportsErrorReturn<Error> {
+public:
+ static const bool value = true;
+};
+
+template <typename T>
+class SupportsErrorReturn<Expected<T>> {
+public:
+ static const bool value = true;
+};
+
+// RespondHelper packages return values based on whether or not the declared
+// RPC function return type supports error returns.
+template <bool FuncSupportsErrorReturn>
+class RespondHelper;
+
+// RespondHelper specialization for functions that support error returns.
+template <>
+class RespondHelper<true> {
+public:
+
+ // Send Expected<T>.
+ template <typename WireRetT, typename HandlerRetT, typename ChannelT,
+ typename FunctionIdT, typename SequenceNumberT>
+ static Error sendResult(ChannelT &C, const FunctionIdT &ResponseId,
+ SequenceNumberT SeqNo,
+ Expected<HandlerRetT> ResultOrErr) {
+ if (!ResultOrErr && ResultOrErr.template errorIsA<RPCFatalError>())
+ return ResultOrErr.takeError();
+
+ // Open the response message.
+ if (auto Err = C.startSendMessage(ResponseId, SeqNo))
+ return Err;
+
+ // Serialize the result.
+ if (auto Err =
+ SerializationTraits<ChannelT, WireRetT,
+ Expected<HandlerRetT>>::serialize(
+ C, std::move(ResultOrErr)))
+ return Err;
+
+ // Close the response message.
+ return C.endSendMessage();
+ }
+
+ template <typename ChannelT, typename FunctionIdT, typename SequenceNumberT>
+ static Error sendResult(ChannelT &C, const FunctionIdT &ResponseId,
+ SequenceNumberT SeqNo, Error Err) {
+ if (Err && Err.isA<RPCFatalError>())
+ return Err;
+ if (auto Err2 = C.startSendMessage(ResponseId, SeqNo))
+ return Err2;
+ if (auto Err2 = serializeSeq(C, std::move(Err)))
+ return Err2;
+ return C.endSendMessage();
+ }
+
+};
+
+// RespondHelper specialization for functions that do not support error returns.
+template <>
+class RespondHelper<false> {
+public:
+
+ template <typename WireRetT, typename HandlerRetT, typename ChannelT,
+ typename FunctionIdT, typename SequenceNumberT>
+ static Error sendResult(ChannelT &C, const FunctionIdT &ResponseId,
+ SequenceNumberT SeqNo,
+ Expected<HandlerRetT> ResultOrErr) {
+ if (auto Err = ResultOrErr.takeError())
+ return Err;
+
+ // Open the response message.
+ if (auto Err = C.startSendMessage(ResponseId, SeqNo))
+ return Err;
+
+ // Serialize the result.
+ if (auto Err =
+ SerializationTraits<ChannelT, WireRetT, HandlerRetT>::serialize(
+ C, *ResultOrErr))
+ return Err;
+
+ // Close the response message.
+ return C.endSendMessage();
+ }
+
+ template <typename ChannelT, typename FunctionIdT, typename SequenceNumberT>
+ static Error sendResult(ChannelT &C, const FunctionIdT &ResponseId,
+ SequenceNumberT SeqNo, Error Err) {
+ if (Err)
+ return Err;
+ if (auto Err2 = C.startSendMessage(ResponseId, SeqNo))
+ return Err2;
+ return C.endSendMessage();
+ }
+
+};
+
+
+// Send a response of the given wire return type (WireRetT) over the
+// channel, with the given sequence number.
+template <typename WireRetT, typename HandlerRetT, typename ChannelT,
+ typename FunctionIdT, typename SequenceNumberT>
+Error respond(ChannelT &C, const FunctionIdT &ResponseId,
+ SequenceNumberT SeqNo, Expected<HandlerRetT> ResultOrErr) {
+ return RespondHelper<SupportsErrorReturn<WireRetT>::value>::
+ template sendResult<WireRetT>(C, ResponseId, SeqNo, std::move(ResultOrErr));
+}
+
+// Send an empty response message on the given channel to indicate that
+// the handler ran.
+template <typename WireRetT, typename ChannelT, typename FunctionIdT,
+ typename SequenceNumberT>
+Error respond(ChannelT &C, const FunctionIdT &ResponseId, SequenceNumberT SeqNo,
+ Error Err) {
+ return RespondHelper<SupportsErrorReturn<WireRetT>::value>::
+ sendResult(C, ResponseId, SeqNo, std::move(Err));
+}
+
+// Converts a given type to the equivalent error return type.
+template <typename T> class WrappedHandlerReturn {
+public:
+ using Type = Expected<T>;
+};
+
+template <typename T> class WrappedHandlerReturn<Expected<T>> {
+public:
+ using Type = Expected<T>;
+};
+
+template <> class WrappedHandlerReturn<void> {
+public:
+ using Type = Error;
+};
+
+template <> class WrappedHandlerReturn<Error> {
+public:
+ using Type = Error;
+};
+
+template <> class WrappedHandlerReturn<ErrorSuccess> {
+public:
+ using Type = Error;
+};
+
+// Traits class that strips the response function from the list of handler
+// arguments.
+template <typename FnT> class AsyncHandlerTraits;
+
+template <typename ResultT, typename... ArgTs>
+class AsyncHandlerTraits<Error(std::function<Error(Expected<ResultT>)>, ArgTs...)> {
+public:
+ using Type = Error(ArgTs...);
+ using ResultType = Expected<ResultT>;
+};
+
+template <typename... ArgTs>
+class AsyncHandlerTraits<Error(std::function<Error(Error)>, ArgTs...)> {
+public:
+ using Type = Error(ArgTs...);
+ using ResultType = Error;
+};
+
+template <typename... ArgTs>
+class AsyncHandlerTraits<ErrorSuccess(std::function<Error(Error)>, ArgTs...)> {
+public:
+ using Type = Error(ArgTs...);
+ using ResultType = Error;
+};
+
+template <typename... ArgTs>
+class AsyncHandlerTraits<void(std::function<Error(Error)>, ArgTs...)> {
+public:
+ using Type = Error(ArgTs...);
+ using ResultType = Error;
+};
+
+template <typename ResponseHandlerT, typename... ArgTs>
+class AsyncHandlerTraits<Error(ResponseHandlerT, ArgTs...)> :
+ public AsyncHandlerTraits<Error(typename std::decay<ResponseHandlerT>::type,
+ ArgTs...)> {};
+
+// This template class provides utilities related to RPC function handlers.
+// The base case applies to non-function types (the template class is
+// specialized for function types) and inherits from the appropriate
+// speciilization for the given non-function type's call operator.
+template <typename HandlerT>
+class HandlerTraits : public HandlerTraits<decltype(
+ &std::remove_reference<HandlerT>::type::operator())> {
+};
+
+// Traits for handlers with a given function type.
+template <typename RetT, typename... ArgTs>
+class HandlerTraits<RetT(ArgTs...)> {
+public:
+ // Function type of the handler.
+ using Type = RetT(ArgTs...);
+
+ // Return type of the handler.
+ using ReturnType = RetT;
+
+ // Call the given handler with the given arguments.
+ template <typename HandlerT, typename... TArgTs>
+ static typename WrappedHandlerReturn<RetT>::Type
+ unpackAndRun(HandlerT &Handler, std::tuple<TArgTs...> &Args) {
+ return unpackAndRunHelper(Handler, Args,
+ llvm::index_sequence_for<TArgTs...>());
+ }
+
+ // Call the given handler with the given arguments.
+ template <typename HandlerT, typename ResponderT, typename... TArgTs>
+ static Error unpackAndRunAsync(HandlerT &Handler, ResponderT &Responder,
+ std::tuple<TArgTs...> &Args) {
+ return unpackAndRunAsyncHelper(Handler, Responder, Args,
+ llvm::index_sequence_for<TArgTs...>());
+ }
+
+ // Call the given handler with the given arguments.
+ template <typename HandlerT>
+ static typename std::enable_if<
+ std::is_void<typename HandlerTraits<HandlerT>::ReturnType>::value,
+ Error>::type
+ run(HandlerT &Handler, ArgTs &&... Args) {
+ Handler(std::move(Args)...);
+ return Error::success();
+ }
+
+ template <typename HandlerT, typename... TArgTs>
+ static typename std::enable_if<
+ !std::is_void<typename HandlerTraits<HandlerT>::ReturnType>::value,
+ typename HandlerTraits<HandlerT>::ReturnType>::type
+ run(HandlerT &Handler, TArgTs... Args) {
+ return Handler(std::move(Args)...);
+ }
+
+ // Serialize arguments to the channel.
+ template <typename ChannelT, typename... CArgTs>
+ static Error serializeArgs(ChannelT &C, const CArgTs... CArgs) {
+ return SequenceSerialization<ChannelT, ArgTs...>::serialize(C, CArgs...);
+ }
+
+ // Deserialize arguments from the channel.
+ template <typename ChannelT, typename... CArgTs>
+ static Error deserializeArgs(ChannelT &C, std::tuple<CArgTs...> &Args) {
+ return deserializeArgsHelper(C, Args,
+ llvm::index_sequence_for<CArgTs...>());
+ }
+
+private:
+ template <typename ChannelT, typename... CArgTs, size_t... Indexes>
+ static Error deserializeArgsHelper(ChannelT &C, std::tuple<CArgTs...> &Args,
+ llvm::index_sequence<Indexes...> _) {
+ return SequenceSerialization<ChannelT, ArgTs...>::deserialize(
+ C, std::get<Indexes>(Args)...);
+ }
+
+ template <typename HandlerT, typename ArgTuple, size_t... Indexes>
+ static typename WrappedHandlerReturn<
+ typename HandlerTraits<HandlerT>::ReturnType>::Type
+ unpackAndRunHelper(HandlerT &Handler, ArgTuple &Args,
+ llvm::index_sequence<Indexes...>) {
+ return run(Handler, std::move(std::get<Indexes>(Args))...);
+ }
+
+
+ template <typename HandlerT, typename ResponderT, typename ArgTuple,
+ size_t... Indexes>
+ static typename WrappedHandlerReturn<
+ typename HandlerTraits<HandlerT>::ReturnType>::Type
+ unpackAndRunAsyncHelper(HandlerT &Handler, ResponderT &Responder,
+ ArgTuple &Args,
+ llvm::index_sequence<Indexes...>) {
+ return run(Handler, Responder, std::move(std::get<Indexes>(Args))...);
+ }
+};
+
+// Handler traits for free functions.
+template <typename RetT, typename... ArgTs>
+class HandlerTraits<RetT(*)(ArgTs...)>
+ : public HandlerTraits<RetT(ArgTs...)> {};
+
+// Handler traits for class methods (especially call operators for lambdas).
+template <typename Class, typename RetT, typename... ArgTs>
+class HandlerTraits<RetT (Class::*)(ArgTs...)>
+ : public HandlerTraits<RetT(ArgTs...)> {};
+
+// Handler traits for const class methods (especially call operators for
+// lambdas).
+template <typename Class, typename RetT, typename... ArgTs>
+class HandlerTraits<RetT (Class::*)(ArgTs...) const>
+ : public HandlerTraits<RetT(ArgTs...)> {};
+
+// Utility to peel the Expected wrapper off a response handler error type.
+template <typename HandlerT> class ResponseHandlerArg;
+
+template <typename ArgT> class ResponseHandlerArg<Error(Expected<ArgT>)> {
+public:
+ using ArgType = Expected<ArgT>;
+ using UnwrappedArgType = ArgT;
+};
+
+template <typename ArgT>
+class ResponseHandlerArg<ErrorSuccess(Expected<ArgT>)> {
+public:
+ using ArgType = Expected<ArgT>;
+ using UnwrappedArgType = ArgT;
+};
+
+template <> class ResponseHandlerArg<Error(Error)> {
+public:
+ using ArgType = Error;
+};
+
+template <> class ResponseHandlerArg<ErrorSuccess(Error)> {
+public:
+ using ArgType = Error;
+};
+
+// ResponseHandler represents a handler for a not-yet-received function call
+// result.
+template <typename ChannelT> class ResponseHandler {
+public:
+ virtual ~ResponseHandler() {}
+
+ // Reads the function result off the wire and acts on it. The meaning of
+ // "act" will depend on how this method is implemented in any given
+ // ResponseHandler subclass but could, for example, mean running a
+ // user-specified handler or setting a promise value.
+ virtual Error handleResponse(ChannelT &C) = 0;
+
+ // Abandons this outstanding result.
+ virtual void abandon() = 0;
+
+ // Create an error instance representing an abandoned response.
+ static Error createAbandonedResponseError() {
+ return make_error<ResponseAbandoned>();
+ }
+};
+
+// ResponseHandler subclass for RPC functions with non-void returns.
+template <typename ChannelT, typename FuncRetT, typename HandlerT>
+class ResponseHandlerImpl : public ResponseHandler<ChannelT> {
+public:
+ ResponseHandlerImpl(HandlerT Handler) : Handler(std::move(Handler)) {}
+
+ // Handle the result by deserializing it from the channel then passing it
+ // to the user defined handler.
+ Error handleResponse(ChannelT &C) override {
+ using UnwrappedArgType = typename ResponseHandlerArg<
+ typename HandlerTraits<HandlerT>::Type>::UnwrappedArgType;
+ UnwrappedArgType Result;
+ if (auto Err =
+ SerializationTraits<ChannelT, FuncRetT,
+ UnwrappedArgType>::deserialize(C, Result))
+ return Err;
+ if (auto Err = C.endReceiveMessage())
+ return Err;
+ return Handler(std::move(Result));
+ }
+
+ // Abandon this response by calling the handler with an 'abandoned response'
+ // error.
+ void abandon() override {
+ if (auto Err = Handler(this->createAbandonedResponseError())) {
+ // Handlers should not fail when passed an abandoned response error.
+ report_fatal_error(std::move(Err));
+ }
+ }
+
+private:
+ HandlerT Handler;
+};
+
+// ResponseHandler subclass for RPC functions with void returns.
+template <typename ChannelT, typename HandlerT>
+class ResponseHandlerImpl<ChannelT, void, HandlerT>
+ : public ResponseHandler<ChannelT> {
+public:
+ ResponseHandlerImpl(HandlerT Handler) : Handler(std::move(Handler)) {}
+
+ // Handle the result (no actual value, just a notification that the function
+ // has completed on the remote end) by calling the user-defined handler with
+ // Error::success().
+ Error handleResponse(ChannelT &C) override {
+ if (auto Err = C.endReceiveMessage())
+ return Err;
+ return Handler(Error::success());
+ }
+
+ // Abandon this response by calling the handler with an 'abandoned response'
+ // error.
+ void abandon() override {
+ if (auto Err = Handler(this->createAbandonedResponseError())) {
+ // Handlers should not fail when passed an abandoned response error.
+ report_fatal_error(std::move(Err));
+ }
+ }
+
+private:
+ HandlerT Handler;
+};
+
+template <typename ChannelT, typename FuncRetT, typename HandlerT>
+class ResponseHandlerImpl<ChannelT, Expected<FuncRetT>, HandlerT>
+ : public ResponseHandler<ChannelT> {
+public:
+ ResponseHandlerImpl(HandlerT Handler) : Handler(std::move(Handler)) {}
+
+ // Handle the result by deserializing it from the channel then passing it
+ // to the user defined handler.
+ Error handleResponse(ChannelT &C) override {
+ using HandlerArgType = typename ResponseHandlerArg<
+ typename HandlerTraits<HandlerT>::Type>::ArgType;
+ HandlerArgType Result((typename HandlerArgType::value_type()));
+
+ if (auto Err =
+ SerializationTraits<ChannelT, Expected<FuncRetT>,
+ HandlerArgType>::deserialize(C, Result))
+ return Err;
+ if (auto Err = C.endReceiveMessage())
+ return Err;
+ return Handler(std::move(Result));
+ }
+
+ // Abandon this response by calling the handler with an 'abandoned response'
+ // error.
+ void abandon() override {
+ if (auto Err = Handler(this->createAbandonedResponseError())) {
+ // Handlers should not fail when passed an abandoned response error.
+ report_fatal_error(std::move(Err));
+ }
+ }
+
+private:
+ HandlerT Handler;
+};
+
+template <typename ChannelT, typename HandlerT>
+class ResponseHandlerImpl<ChannelT, Error, HandlerT>
+ : public ResponseHandler<ChannelT> {
+public:
+ ResponseHandlerImpl(HandlerT Handler) : Handler(std::move(Handler)) {}
+
+ // Handle the result by deserializing it from the channel then passing it
+ // to the user defined handler.
+ Error handleResponse(ChannelT &C) override {
+ Error Result = Error::success();
+ if (auto Err =
+ SerializationTraits<ChannelT, Error, Error>::deserialize(C, Result))
+ return Err;
+ if (auto Err = C.endReceiveMessage())
+ return Err;
+ return Handler(std::move(Result));
+ }
+
+ // Abandon this response by calling the handler with an 'abandoned response'
+ // error.
+ void abandon() override {
+ if (auto Err = Handler(this->createAbandonedResponseError())) {
+ // Handlers should not fail when passed an abandoned response error.
+ report_fatal_error(std::move(Err));
+ }
+ }
+
+private:
+ HandlerT Handler;
+};
+
+// Create a ResponseHandler from a given user handler.
+template <typename ChannelT, typename FuncRetT, typename HandlerT>
+std::unique_ptr<ResponseHandler<ChannelT>> createResponseHandler(HandlerT H) {
+ return llvm::make_unique<ResponseHandlerImpl<ChannelT, FuncRetT, HandlerT>>(
+ std::move(H));
+}
+
+// Helper for wrapping member functions up as functors. This is useful for
+// installing methods as result handlers.
+template <typename ClassT, typename RetT, typename... ArgTs>
+class MemberFnWrapper {
+public:
+ using MethodT = RetT (ClassT::*)(ArgTs...);
+ MemberFnWrapper(ClassT &Instance, MethodT Method)
+ : Instance(Instance), Method(Method) {}
+ RetT operator()(ArgTs &&... Args) {
+ return (Instance.*Method)(std::move(Args)...);
+ }
+
+private:
+ ClassT &Instance;
+ MethodT Method;
+};
+
+// Helper that provides a Functor for deserializing arguments.
+template <typename... ArgTs> class ReadArgs {
+public:
+ Error operator()() { return Error::success(); }
+};
+
+template <typename ArgT, typename... ArgTs>
+class ReadArgs<ArgT, ArgTs...> : public ReadArgs<ArgTs...> {
+public:
+ ReadArgs(ArgT &Arg, ArgTs &... Args)
+ : ReadArgs<ArgTs...>(Args...), Arg(Arg) {}
+
+ Error operator()(ArgT &ArgVal, ArgTs &... ArgVals) {
+ this->Arg = std::move(ArgVal);
+ return ReadArgs<ArgTs...>::operator()(ArgVals...);
+ }
+
+private:
+ ArgT &Arg;
+};
+
+// Manage sequence numbers.
+template <typename SequenceNumberT> class SequenceNumberManager {
+public:
+ // Reset, making all sequence numbers available.
+ void reset() {
+ std::lock_guard<std::mutex> Lock(SeqNoLock);
+ NextSequenceNumber = 0;
+ FreeSequenceNumbers.clear();
+ }
+
+ // Get the next available sequence number. Will re-use numbers that have
+ // been released.
+ SequenceNumberT getSequenceNumber() {
+ std::lock_guard<std::mutex> Lock(SeqNoLock);
+ if (FreeSequenceNumbers.empty())
+ return NextSequenceNumber++;
+ auto SequenceNumber = FreeSequenceNumbers.back();
+ FreeSequenceNumbers.pop_back();
+ return SequenceNumber;
+ }
+
+ // Release a sequence number, making it available for re-use.
+ void releaseSequenceNumber(SequenceNumberT SequenceNumber) {
+ std::lock_guard<std::mutex> Lock(SeqNoLock);
+ FreeSequenceNumbers.push_back(SequenceNumber);
+ }
+
+private:
+ std::mutex SeqNoLock;
+ SequenceNumberT NextSequenceNumber = 0;
+ std::vector<SequenceNumberT> FreeSequenceNumbers;
+};
+
+// Checks that predicate P holds for each corresponding pair of type arguments
+// from T1 and T2 tuple.
+template <template <class, class> class P, typename T1Tuple, typename T2Tuple>
+class RPCArgTypeCheckHelper;
+
+template <template <class, class> class P>
+class RPCArgTypeCheckHelper<P, std::tuple<>, std::tuple<>> {
+public:
+ static const bool value = true;
+};
+
+template <template <class, class> class P, typename T, typename... Ts,
+ typename U, typename... Us>
+class RPCArgTypeCheckHelper<P, std::tuple<T, Ts...>, std::tuple<U, Us...>> {
+public:
+ static const bool value =
+ P<T, U>::value &&
+ RPCArgTypeCheckHelper<P, std::tuple<Ts...>, std::tuple<Us...>>::value;
+};
+
+template <template <class, class> class P, typename T1Sig, typename T2Sig>
+class RPCArgTypeCheck {
+public:
+ using T1Tuple = typename FunctionArgsTuple<T1Sig>::Type;
+ using T2Tuple = typename FunctionArgsTuple<T2Sig>::Type;
+
+ static_assert(std::tuple_size<T1Tuple>::value >=
+ std::tuple_size<T2Tuple>::value,
+ "Too many arguments to RPC call");
+ static_assert(std::tuple_size<T1Tuple>::value <=
+ std::tuple_size<T2Tuple>::value,
+ "Too few arguments to RPC call");
+
+ static const bool value = RPCArgTypeCheckHelper<P, T1Tuple, T2Tuple>::value;
+};
+
+template <typename ChannelT, typename WireT, typename ConcreteT>
+class CanSerialize {
+private:
+ using S = SerializationTraits<ChannelT, WireT, ConcreteT>;
+
+ template <typename T>
+ static std::true_type
+ check(typename std::enable_if<
+ std::is_same<decltype(T::serialize(std::declval<ChannelT &>(),
+ std::declval<const ConcreteT &>())),
+ Error>::value,
+ void *>::type);
+
+ template <typename> static std::false_type check(...);
+
+public:
+ static const bool value = decltype(check<S>(0))::value;
+};
+
+template <typename ChannelT, typename WireT, typename ConcreteT>
+class CanDeserialize {
+private:
+ using S = SerializationTraits<ChannelT, WireT, ConcreteT>;
+
+ template <typename T>
+ static std::true_type
+ check(typename std::enable_if<
+ std::is_same<decltype(T::deserialize(std::declval<ChannelT &>(),
+ std::declval<ConcreteT &>())),
+ Error>::value,
+ void *>::type);
+
+ template <typename> static std::false_type check(...);
+
+public:
+ static const bool value = decltype(check<S>(0))::value;
+};
+
+/// Contains primitive utilities for defining, calling and handling calls to
+/// remote procedures. ChannelT is a bidirectional stream conforming to the
+/// RPCChannel interface (see RPCChannel.h), FunctionIdT is a procedure
+/// identifier type that must be serializable on ChannelT, and SequenceNumberT
+/// is an integral type that will be used to number in-flight function calls.
+///
+/// These utilities support the construction of very primitive RPC utilities.
+/// Their intent is to ensure correct serialization and deserialization of
+/// procedure arguments, and to keep the client and server's view of the API in
+/// sync.
+template <typename ImplT, typename ChannelT, typename FunctionIdT,
+ typename SequenceNumberT>
+class RPCEndpointBase {
+protected:
+ class OrcRPCInvalid : public Function<OrcRPCInvalid, void()> {
+ public:
+ static const char *getName() { return "__orc_rpc$invalid"; }
+ };
+
+ class OrcRPCResponse : public Function<OrcRPCResponse, void()> {
+ public:
+ static const char *getName() { return "__orc_rpc$response"; }
+ };
+
+ class OrcRPCNegotiate
+ : public Function<OrcRPCNegotiate, FunctionIdT(std::string)> {
+ public:
+ static const char *getName() { return "__orc_rpc$negotiate"; }
+ };
+
+ // Helper predicate for testing for the presence of SerializeTraits
+ // serializers.
+ template <typename WireT, typename ConcreteT>
+ class CanSerializeCheck : detail::CanSerialize<ChannelT, WireT, ConcreteT> {
+ public:
+ using detail::CanSerialize<ChannelT, WireT, ConcreteT>::value;
+
+ static_assert(value, "Missing serializer for argument (Can't serialize the "
+ "first template type argument of CanSerializeCheck "
+ "from the second)");
+ };
+
+ // Helper predicate for testing for the presence of SerializeTraits
+ // deserializers.
+ template <typename WireT, typename ConcreteT>
+ class CanDeserializeCheck
+ : detail::CanDeserialize<ChannelT, WireT, ConcreteT> {
+ public:
+ using detail::CanDeserialize<ChannelT, WireT, ConcreteT>::value;
+
+ static_assert(value, "Missing deserializer for argument (Can't deserialize "
+ "the second template type argument of "
+ "CanDeserializeCheck from the first)");
+ };
+
+public:
+ /// Construct an RPC instance on a channel.
+ RPCEndpointBase(ChannelT &C, bool LazyAutoNegotiation)
+ : C(C), LazyAutoNegotiation(LazyAutoNegotiation) {
+ // Hold ResponseId in a special variable, since we expect Response to be
+ // called relatively frequently, and want to avoid the map lookup.
+ ResponseId = FnIdAllocator.getResponseId();
+ RemoteFunctionIds[OrcRPCResponse::getPrototype()] = ResponseId;
+
+ // Register the negotiate function id and handler.
+ auto NegotiateId = FnIdAllocator.getNegotiateId();
+ RemoteFunctionIds[OrcRPCNegotiate::getPrototype()] = NegotiateId;
+ Handlers[NegotiateId] = wrapHandler<OrcRPCNegotiate>(
+ [this](const std::string &Name) { return handleNegotiate(Name); });
+ }
+
+
+ /// Negotiate a function id for Func with the other end of the channel.
+ template <typename Func> Error negotiateFunction(bool Retry = false) {
+ return getRemoteFunctionId<Func>(true, Retry).takeError();
+ }
+
+ /// Append a call Func, does not call send on the channel.
+ /// The first argument specifies a user-defined handler to be run when the
+ /// function returns. The handler should take an Expected<Func::ReturnType>,
+ /// or an Error (if Func::ReturnType is void). The handler will be called
+ /// with an error if the return value is abandoned due to a channel error.
+ template <typename Func, typename HandlerT, typename... ArgTs>
+ Error appendCallAsync(HandlerT Handler, const ArgTs &... Args) {
+
+ static_assert(
+ detail::RPCArgTypeCheck<CanSerializeCheck, typename Func::Type,
+ void(ArgTs...)>::value,
+ "");
+
+ // Look up the function ID.
+ FunctionIdT FnId;
+ if (auto FnIdOrErr = getRemoteFunctionId<Func>(LazyAutoNegotiation, false))
+ FnId = *FnIdOrErr;
+ else {
+ // Negotiation failed. Notify the handler then return the negotiate-failed
+ // error.
+ cantFail(Handler(make_error<ResponseAbandoned>()));
+ return FnIdOrErr.takeError();
+ }
+
+ SequenceNumberT SeqNo; // initialized in locked scope below.
+ {
+ // Lock the pending responses map and sequence number manager.
+ std::lock_guard<std::mutex> Lock(ResponsesMutex);
+
+ // Allocate a sequence number.
+ SeqNo = SequenceNumberMgr.getSequenceNumber();
+ assert(!PendingResponses.count(SeqNo) &&
+ "Sequence number already allocated");
+
+ // Install the user handler.
+ PendingResponses[SeqNo] =
+ detail::createResponseHandler<ChannelT, typename Func::ReturnType>(
+ std::move(Handler));
+ }
+
+ // Open the function call message.
+ if (auto Err = C.startSendMessage(FnId, SeqNo)) {
+ abandonPendingResponses();
+ return Err;
+ }
+
+ // Serialize the call arguments.
+ if (auto Err = detail::HandlerTraits<typename Func::Type>::serializeArgs(
+ C, Args...)) {
+ abandonPendingResponses();
+ return Err;
+ }
+
+ // Close the function call messagee.
+ if (auto Err = C.endSendMessage()) {
+ abandonPendingResponses();
+ return Err;
+ }
+
+ return Error::success();
+ }
+
+ Error sendAppendedCalls() { return C.send(); };
+
+ template <typename Func, typename HandlerT, typename... ArgTs>
+ Error callAsync(HandlerT Handler, const ArgTs &... Args) {
+ if (auto Err = appendCallAsync<Func>(std::move(Handler), Args...))
+ return Err;
+ return C.send();
+ }
+
+ /// Handle one incoming call.
+ Error handleOne() {
+ FunctionIdT FnId;
+ SequenceNumberT SeqNo;
+ if (auto Err = C.startReceiveMessage(FnId, SeqNo)) {
+ abandonPendingResponses();
+ return Err;
+ }
+ if (FnId == ResponseId)
+ return handleResponse(SeqNo);
+ auto I = Handlers.find(FnId);
+ if (I != Handlers.end())
+ return I->second(C, SeqNo);
+
+ // else: No handler found. Report error to client?
+ return make_error<BadFunctionCall<FunctionIdT, SequenceNumberT>>(FnId,
+ SeqNo);
+ }
+
+ /// Helper for handling setter procedures - this method returns a functor that
+ /// sets the variables referred to by Args... to values deserialized from the
+ /// channel.
+ /// E.g.
+ ///
+ /// typedef Function<0, bool, int> Func1;
+ ///
+ /// ...
+ /// bool B;
+ /// int I;
+ /// if (auto Err = expect<Func1>(Channel, readArgs(B, I)))
+ /// /* Handle Args */ ;
+ ///
+ template <typename... ArgTs>
+ static detail::ReadArgs<ArgTs...> readArgs(ArgTs &... Args) {
+ return detail::ReadArgs<ArgTs...>(Args...);
+ }
+
+ /// Abandon all outstanding result handlers.
+ ///
+ /// This will call all currently registered result handlers to receive an
+ /// "abandoned" error as their argument. This is used internally by the RPC
+ /// in error situations, but can also be called directly by clients who are
+ /// disconnecting from the remote and don't or can't expect responses to their
+ /// outstanding calls. (Especially for outstanding blocking calls, calling
+ /// this function may be necessary to avoid dead threads).
+ void abandonPendingResponses() {
+ // Lock the pending responses map and sequence number manager.
+ std::lock_guard<std::mutex> Lock(ResponsesMutex);
+
+ for (auto &KV : PendingResponses)
+ KV.second->abandon();
+ PendingResponses.clear();
+ SequenceNumberMgr.reset();
+ }
+
+ /// Remove the handler for the given function.
+ /// A handler must currently be registered for this function.
+ template <typename Func>
+ void removeHandler() {
+ auto IdItr = LocalFunctionIds.find(Func::getPrototype());
+ assert(IdItr != LocalFunctionIds.end() &&
+ "Function does not have a registered handler");
+ auto HandlerItr = Handlers.find(IdItr->second);
+ assert(HandlerItr != Handlers.end() &&
+ "Function does not have a registered handler");
+ Handlers.erase(HandlerItr);
+ }
+
+ /// Clear all handlers.
+ void clearHandlers() {
+ Handlers.clear();
+ }
+
+protected:
+
+ FunctionIdT getInvalidFunctionId() const {
+ return FnIdAllocator.getInvalidId();
+ }
+
+ /// Add the given handler to the handler map and make it available for
+ /// autonegotiation and execution.
+ template <typename Func, typename HandlerT>
+ void addHandlerImpl(HandlerT Handler) {
+
+ static_assert(detail::RPCArgTypeCheck<
+ CanDeserializeCheck, typename Func::Type,
+ typename detail::HandlerTraits<HandlerT>::Type>::value,
+ "");
+
+ FunctionIdT NewFnId = FnIdAllocator.template allocate<Func>();
+ LocalFunctionIds[Func::getPrototype()] = NewFnId;
+ Handlers[NewFnId] = wrapHandler<Func>(std::move(Handler));
+ }
+
+ template <typename Func, typename HandlerT>
+ void addAsyncHandlerImpl(HandlerT Handler) {
+
+ static_assert(detail::RPCArgTypeCheck<
+ CanDeserializeCheck, typename Func::Type,
+ typename detail::AsyncHandlerTraits<
+ typename detail::HandlerTraits<HandlerT>::Type
+ >::Type>::value,
+ "");
+
+ FunctionIdT NewFnId = FnIdAllocator.template allocate<Func>();
+ LocalFunctionIds[Func::getPrototype()] = NewFnId;
+ Handlers[NewFnId] = wrapAsyncHandler<Func>(std::move(Handler));
+ }
+
+ Error handleResponse(SequenceNumberT SeqNo) {
+ using Handler = typename decltype(PendingResponses)::mapped_type;
+ Handler PRHandler;
+
+ {
+ // Lock the pending responses map and sequence number manager.
+ std::unique_lock<std::mutex> Lock(ResponsesMutex);
+ auto I = PendingResponses.find(SeqNo);
+
+ if (I != PendingResponses.end()) {
+ PRHandler = std::move(I->second);
+ PendingResponses.erase(I);
+ SequenceNumberMgr.releaseSequenceNumber(SeqNo);
+ } else {
+ // Unlock the pending results map to prevent recursive lock.
+ Lock.unlock();
+ abandonPendingResponses();
+ return make_error<
+ InvalidSequenceNumberForResponse<SequenceNumberT>>(SeqNo);
+ }
+ }
+
+ assert(PRHandler &&
+ "If we didn't find a response handler we should have bailed out");
+
+ if (auto Err = PRHandler->handleResponse(C)) {
+ abandonPendingResponses();
+ return Err;
+ }
+
+ return Error::success();
+ }
+
+ FunctionIdT handleNegotiate(const std::string &Name) {
+ auto I = LocalFunctionIds.find(Name);
+ if (I == LocalFunctionIds.end())
+ return getInvalidFunctionId();
+ return I->second;
+ }
+
+ // Find the remote FunctionId for the given function.
+ template <typename Func>
+ Expected<FunctionIdT> getRemoteFunctionId(bool NegotiateIfNotInMap,
+ bool NegotiateIfInvalid) {
+ bool DoNegotiate;
+
+ // Check if we already have a function id...
+ auto I = RemoteFunctionIds.find(Func::getPrototype());
+ if (I != RemoteFunctionIds.end()) {
+ // If it's valid there's nothing left to do.
+ if (I->second != getInvalidFunctionId())
+ return I->second;
+ DoNegotiate = NegotiateIfInvalid;
+ } else
+ DoNegotiate = NegotiateIfNotInMap;
+
+ // We don't have a function id for Func yet, but we're allowed to try to
+ // negotiate one.
+ if (DoNegotiate) {
+ auto &Impl = static_cast<ImplT &>(*this);
+ if (auto RemoteIdOrErr =
+ Impl.template callB<OrcRPCNegotiate>(Func::getPrototype())) {
+ RemoteFunctionIds[Func::getPrototype()] = *RemoteIdOrErr;
+ if (*RemoteIdOrErr == getInvalidFunctionId())
+ return make_error<CouldNotNegotiate>(Func::getPrototype());
+ return *RemoteIdOrErr;
+ } else
+ return RemoteIdOrErr.takeError();
+ }
+
+ // No key was available in the map and we weren't allowed to try to
+ // negotiate one, so return an unknown function error.
+ return make_error<CouldNotNegotiate>(Func::getPrototype());
+ }
+
+ using WrappedHandlerFn = std::function<Error(ChannelT &, SequenceNumberT)>;
+
+ // Wrap the given user handler in the necessary argument-deserialization code,
+ // result-serialization code, and call to the launch policy (if present).
+ template <typename Func, typename HandlerT>
+ WrappedHandlerFn wrapHandler(HandlerT Handler) {
+ return [this, Handler](ChannelT &Channel,
+ SequenceNumberT SeqNo) mutable -> Error {
+ // Start by deserializing the arguments.
+ using ArgsTuple =
+ typename detail::FunctionArgsTuple<
+ typename detail::HandlerTraits<HandlerT>::Type>::Type;
+ auto Args = std::make_shared<ArgsTuple>();
+
+ if (auto Err =
+ detail::HandlerTraits<typename Func::Type>::deserializeArgs(
+ Channel, *Args))
+ return Err;
+
+ // GCC 4.7 and 4.8 incorrectly issue a -Wunused-but-set-variable warning
+ // for RPCArgs. Void cast RPCArgs to work around this for now.
+ // FIXME: Remove this workaround once we can assume a working GCC version.
+ (void)Args;
+
+ // End receieve message, unlocking the channel for reading.
+ if (auto Err = Channel.endReceiveMessage())
+ return Err;
+
+ using HTraits = detail::HandlerTraits<HandlerT>;
+ using FuncReturn = typename Func::ReturnType;
+ return detail::respond<FuncReturn>(Channel, ResponseId, SeqNo,
+ HTraits::unpackAndRun(Handler, *Args));
+ };
+ }
+
+ // Wrap the given user handler in the necessary argument-deserialization code,
+ // result-serialization code, and call to the launch policy (if present).
+ template <typename Func, typename HandlerT>
+ WrappedHandlerFn wrapAsyncHandler(HandlerT Handler) {
+ return [this, Handler](ChannelT &Channel,
+ SequenceNumberT SeqNo) mutable -> Error {
+ // Start by deserializing the arguments.
+ using AHTraits = detail::AsyncHandlerTraits<
+ typename detail::HandlerTraits<HandlerT>::Type>;
+ using ArgsTuple =
+ typename detail::FunctionArgsTuple<typename AHTraits::Type>::Type;
+ auto Args = std::make_shared<ArgsTuple>();
+
+ if (auto Err =
+ detail::HandlerTraits<typename Func::Type>::deserializeArgs(
+ Channel, *Args))
+ return Err;
+
+ // GCC 4.7 and 4.8 incorrectly issue a -Wunused-but-set-variable warning
+ // for RPCArgs. Void cast RPCArgs to work around this for now.
+ // FIXME: Remove this workaround once we can assume a working GCC version.
+ (void)Args;
+
+ // End receieve message, unlocking the channel for reading.
+ if (auto Err = Channel.endReceiveMessage())
+ return Err;
+
+ using HTraits = detail::HandlerTraits<HandlerT>;
+ using FuncReturn = typename Func::ReturnType;
+ auto Responder =
+ [this, SeqNo](typename AHTraits::ResultType RetVal) -> Error {
+ return detail::respond<FuncReturn>(C, ResponseId, SeqNo,
+ std::move(RetVal));
+ };
+
+ return HTraits::unpackAndRunAsync(Handler, Responder, *Args);
+ };
+ }
+
+ ChannelT &C;
+
+ bool LazyAutoNegotiation;
+
+ RPCFunctionIdAllocator<FunctionIdT> FnIdAllocator;
+
+ FunctionIdT ResponseId;
+ std::map<std::string, FunctionIdT> LocalFunctionIds;
+ std::map<const char *, FunctionIdT> RemoteFunctionIds;
+
+ std::map<FunctionIdT, WrappedHandlerFn> Handlers;
+
+ std::mutex ResponsesMutex;
+ detail::SequenceNumberManager<SequenceNumberT> SequenceNumberMgr;
+ std::map<SequenceNumberT, std::unique_ptr<detail::ResponseHandler<ChannelT>>>
+ PendingResponses;
+};
+
+} // end namespace detail
+
+template <typename ChannelT, typename FunctionIdT = uint32_t,
+ typename SequenceNumberT = uint32_t>
+class MultiThreadedRPCEndpoint
+ : public detail::RPCEndpointBase<
+ MultiThreadedRPCEndpoint<ChannelT, FunctionIdT, SequenceNumberT>,
+ ChannelT, FunctionIdT, SequenceNumberT> {
+private:
+ using BaseClass =
+ detail::RPCEndpointBase<
+ MultiThreadedRPCEndpoint<ChannelT, FunctionIdT, SequenceNumberT>,
+ ChannelT, FunctionIdT, SequenceNumberT>;
+
+public:
+ MultiThreadedRPCEndpoint(ChannelT &C, bool LazyAutoNegotiation)
+ : BaseClass(C, LazyAutoNegotiation) {}
+
+ /// Add a handler for the given RPC function.
+ /// This installs the given handler functor for the given RPC Function, and
+ /// makes the RPC function available for negotiation/calling from the remote.
+ template <typename Func, typename HandlerT>
+ void addHandler(HandlerT Handler) {
+ return this->template addHandlerImpl<Func>(std::move(Handler));
+ }
+
+ /// Add a class-method as a handler.
+ template <typename Func, typename ClassT, typename RetT, typename... ArgTs>
+ void addHandler(ClassT &Object, RetT (ClassT::*Method)(ArgTs...)) {
+ addHandler<Func>(
+ detail::MemberFnWrapper<ClassT, RetT, ArgTs...>(Object, Method));
+ }
+
+ template <typename Func, typename HandlerT>
+ void addAsyncHandler(HandlerT Handler) {
+ return this->template addAsyncHandlerImpl<Func>(std::move(Handler));
+ }
+
+ /// Add a class-method as a handler.
+ template <typename Func, typename ClassT, typename RetT, typename... ArgTs>
+ void addAsyncHandler(ClassT &Object, RetT (ClassT::*Method)(ArgTs...)) {
+ addAsyncHandler<Func>(
+ detail::MemberFnWrapper<ClassT, RetT, ArgTs...>(Object, Method));
+ }
+
+ /// Return type for non-blocking call primitives.
+ template <typename Func>
+ using NonBlockingCallResult = typename detail::ResultTraits<
+ typename Func::ReturnType>::ReturnFutureType;
+
+ /// Call Func on Channel C. Does not block, does not call send. Returns a pair
+ /// of a future result and the sequence number assigned to the result.
+ ///
+ /// This utility function is primarily used for single-threaded mode support,
+ /// where the sequence number can be used to wait for the corresponding
+ /// result. In multi-threaded mode the appendCallNB method, which does not
+ /// return the sequence numeber, should be preferred.
+ template <typename Func, typename... ArgTs>
+ Expected<NonBlockingCallResult<Func>> appendCallNB(const ArgTs &... Args) {
+ using RTraits = detail::ResultTraits<typename Func::ReturnType>;
+ using ErrorReturn = typename RTraits::ErrorReturnType;
+ using ErrorReturnPromise = typename RTraits::ReturnPromiseType;
+
+ // FIXME: Stack allocate and move this into the handler once LLVM builds
+ // with C++14.
+ auto Promise = std::make_shared<ErrorReturnPromise>();
+ auto FutureResult = Promise->get_future();
+
+ if (auto Err = this->template appendCallAsync<Func>(
+ [Promise](ErrorReturn RetOrErr) {
+ Promise->set_value(std::move(RetOrErr));
+ return Error::success();
+ },
+ Args...)) {
+ RTraits::consumeAbandoned(FutureResult.get());
+ return std::move(Err);
+ }
+ return std::move(FutureResult);
+ }
+
+ /// The same as appendCallNBWithSeq, except that it calls C.send() to
+ /// flush the channel after serializing the call.
+ template <typename Func, typename... ArgTs>
+ Expected<NonBlockingCallResult<Func>> callNB(const ArgTs &... Args) {
+ auto Result = appendCallNB<Func>(Args...);
+ if (!Result)
+ return Result;
+ if (auto Err = this->C.send()) {
+ this->abandonPendingResponses();
+ detail::ResultTraits<typename Func::ReturnType>::consumeAbandoned(
+ std::move(Result->get()));
+ return std::move(Err);
+ }
+ return Result;
+ }
+
+ /// Call Func on Channel C. Blocks waiting for a result. Returns an Error
+ /// for void functions or an Expected<T> for functions returning a T.
+ ///
+ /// This function is for use in threaded code where another thread is
+ /// handling responses and incoming calls.
+ template <typename Func, typename... ArgTs,
+ typename AltRetT = typename Func::ReturnType>
+ typename detail::ResultTraits<AltRetT>::ErrorReturnType
+ callB(const ArgTs &... Args) {
+ if (auto FutureResOrErr = callNB<Func>(Args...))
+ return FutureResOrErr->get();
+ else
+ return FutureResOrErr.takeError();
+ }
+
+ /// Handle incoming RPC calls.
+ Error handlerLoop() {
+ while (true)
+ if (auto Err = this->handleOne())
+ return Err;
+ return Error::success();
+ }
+};
+
+template <typename ChannelT, typename FunctionIdT = uint32_t,
+ typename SequenceNumberT = uint32_t>
+class SingleThreadedRPCEndpoint
+ : public detail::RPCEndpointBase<
+ SingleThreadedRPCEndpoint<ChannelT, FunctionIdT, SequenceNumberT>,
+ ChannelT, FunctionIdT, SequenceNumberT> {
+private:
+ using BaseClass =
+ detail::RPCEndpointBase<
+ SingleThreadedRPCEndpoint<ChannelT, FunctionIdT, SequenceNumberT>,
+ ChannelT, FunctionIdT, SequenceNumberT>;
+
+public:
+ SingleThreadedRPCEndpoint(ChannelT &C, bool LazyAutoNegotiation)
+ : BaseClass(C, LazyAutoNegotiation) {}
+
+ template <typename Func, typename HandlerT>
+ void addHandler(HandlerT Handler) {
+ return this->template addHandlerImpl<Func>(std::move(Handler));
+ }
+
+ template <typename Func, typename ClassT, typename RetT, typename... ArgTs>
+ void addHandler(ClassT &Object, RetT (ClassT::*Method)(ArgTs...)) {
+ addHandler<Func>(
+ detail::MemberFnWrapper<ClassT, RetT, ArgTs...>(Object, Method));
+ }
+
+ template <typename Func, typename HandlerT>
+ void addAsyncHandler(HandlerT Handler) {
+ return this->template addAsyncHandlerImpl<Func>(std::move(Handler));
+ }
+
+ /// Add a class-method as a handler.
+ template <typename Func, typename ClassT, typename RetT, typename... ArgTs>
+ void addAsyncHandler(ClassT &Object, RetT (ClassT::*Method)(ArgTs...)) {
+ addAsyncHandler<Func>(
+ detail::MemberFnWrapper<ClassT, RetT, ArgTs...>(Object, Method));
+ }
+
+ template <typename Func, typename... ArgTs,
+ typename AltRetT = typename Func::ReturnType>
+ typename detail::ResultTraits<AltRetT>::ErrorReturnType
+ callB(const ArgTs &... Args) {
+ bool ReceivedResponse = false;
+ using ResultType = typename detail::ResultTraits<AltRetT>::ErrorReturnType;
+ auto Result = detail::ResultTraits<AltRetT>::createBlankErrorReturnValue();
+
+ // We have to 'Check' result (which we know is in a success state at this
+ // point) so that it can be overwritten in the async handler.
+ (void)!!Result;
+
+ if (auto Err = this->template appendCallAsync<Func>(
+ [&](ResultType R) {
+ Result = std::move(R);
+ ReceivedResponse = true;
+ return Error::success();
+ },
+ Args...)) {
+ detail::ResultTraits<typename Func::ReturnType>::consumeAbandoned(
+ std::move(Result));
+ return std::move(Err);
+ }
+
+ while (!ReceivedResponse) {
+ if (auto Err = this->handleOne()) {
+ detail::ResultTraits<typename Func::ReturnType>::consumeAbandoned(
+ std::move(Result));
+ return std::move(Err);
+ }
+ }
+
+ return Result;
+ }
+};
+
+/// Asynchronous dispatch for a function on an RPC endpoint.
+template <typename RPCClass, typename Func>
+class RPCAsyncDispatch {
+public:
+ RPCAsyncDispatch(RPCClass &Endpoint) : Endpoint(Endpoint) {}
+
+ template <typename HandlerT, typename... ArgTs>
+ Error operator()(HandlerT Handler, const ArgTs &... Args) const {
+ return Endpoint.template appendCallAsync<Func>(std::move(Handler), Args...);
+ }
+
+private:
+ RPCClass &Endpoint;
+};
+
+/// Construct an asynchronous dispatcher from an RPC endpoint and a Func.
+template <typename Func, typename RPCEndpointT>
+RPCAsyncDispatch<RPCEndpointT, Func> rpcAsyncDispatch(RPCEndpointT &Endpoint) {
+ return RPCAsyncDispatch<RPCEndpointT, Func>(Endpoint);
+}
+
+/// \brief Allows a set of asynchrounous calls to be dispatched, and then
+/// waited on as a group.
+class ParallelCallGroup {
+public:
+
+ ParallelCallGroup() = default;
+ ParallelCallGroup(const ParallelCallGroup &) = delete;
+ ParallelCallGroup &operator=(const ParallelCallGroup &) = delete;
+
+ /// \brief Make as asynchronous call.
+ template <typename AsyncDispatcher, typename HandlerT, typename... ArgTs>
+ Error call(const AsyncDispatcher &AsyncDispatch, HandlerT Handler,
+ const ArgTs &... Args) {
+ // Increment the count of outstanding calls. This has to happen before
+ // we invoke the call, as the handler may (depending on scheduling)
+ // be run immediately on another thread, and we don't want the decrement
+ // in the wrapped handler below to run before the increment.
+ {
+ std::unique_lock<std::mutex> Lock(M);
+ ++NumOutstandingCalls;
+ }
+
+ // Wrap the user handler in a lambda that will decrement the
+ // outstanding calls count, then poke the condition variable.
+ using ArgType = typename detail::ResponseHandlerArg<
+ typename detail::HandlerTraits<HandlerT>::Type>::ArgType;
+ // FIXME: Move handler into wrapped handler once we have C++14.
+ auto WrappedHandler = [this, Handler](ArgType Arg) {
+ auto Err = Handler(std::move(Arg));
+ std::unique_lock<std::mutex> Lock(M);
+ --NumOutstandingCalls;
+ CV.notify_all();
+ return Err;
+ };
+
+ return AsyncDispatch(std::move(WrappedHandler), Args...);
+ }
+
+ /// \brief Blocks until all calls have been completed and their return value
+ /// handlers run.
+ void wait() {
+ std::unique_lock<std::mutex> Lock(M);
+ while (NumOutstandingCalls > 0)
+ CV.wait(Lock);
+ }
+
+private:
+ std::mutex M;
+ std::condition_variable CV;
+ uint32_t NumOutstandingCalls = 0;
+};
+
+/// @brief Convenience class for grouping RPC Functions into APIs that can be
+/// negotiated as a block.
+///
+template <typename... Funcs>
+class APICalls {
+public:
+
+ /// @brief Test whether this API contains Function F.
+ template <typename F>
+ class Contains {
+ public:
+ static const bool value = false;
+ };
+
+ /// @brief Negotiate all functions in this API.
+ template <typename RPCEndpoint>
+ static Error negotiate(RPCEndpoint &R) {
+ return Error::success();
+ }
+};
+
+template <typename Func, typename... Funcs>
+class APICalls<Func, Funcs...> {
+public:
+
+ template <typename F>
+ class Contains {
+ public:
+ static const bool value = std::is_same<F, Func>::value |
+ APICalls<Funcs...>::template Contains<F>::value;
+ };
+
+ template <typename RPCEndpoint>
+ static Error negotiate(RPCEndpoint &R) {
+ if (auto Err = R.template negotiateFunction<Func>())
+ return Err;
+ return APICalls<Funcs...>::negotiate(R);
+ }
+
+};
+
+template <typename... InnerFuncs, typename... Funcs>
+class APICalls<APICalls<InnerFuncs...>, Funcs...> {
+public:
+
+ template <typename F>
+ class Contains {
+ public:
+ static const bool value =
+ APICalls<InnerFuncs...>::template Contains<F>::value |
+ APICalls<Funcs...>::template Contains<F>::value;
+ };
+
+ template <typename RPCEndpoint>
+ static Error negotiate(RPCEndpoint &R) {
+ if (auto Err = APICalls<InnerFuncs...>::negotiate(R))
+ return Err;
+ return APICalls<Funcs...>::negotiate(R);
+ }
+
+};
+
+} // end namespace rpc
+} // end namespace orc
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
new file mode 100644
index 0000000..8f0d9fa
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
@@ -0,0 +1,349 @@
+//===- RTDyldObjectLinkingLayer.h - RTDyld-based jit linking ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains the definition for an RTDyld-based, in-process object linking layer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_RTDYLDOBJECTLINKINGLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_RTDYLDOBJECTLINKINGLAYER_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/ExecutionEngine/Orc/Legacy.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Error.h"
+#include <algorithm>
+#include <cassert>
+#include <functional>
+#include <list>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+namespace orc {
+
+class RTDyldObjectLinkingLayerBase {
+public:
+ using ObjectPtr = std::unique_ptr<MemoryBuffer>;
+
+protected:
+
+ /// @brief Holds an object to be allocated/linked as a unit in the JIT.
+ ///
+ /// An instance of this class will be created for each object added
+ /// via JITObjectLayer::addObject. Deleting the instance (via
+ /// removeObject) frees its memory, removing all symbol definitions that
+ /// had been provided by this instance. Higher level layers are responsible
+ /// for taking any action required to handle the missing symbols.
+ class LinkedObject {
+ public:
+ LinkedObject() = default;
+ LinkedObject(const LinkedObject&) = delete;
+ void operator=(const LinkedObject&) = delete;
+ virtual ~LinkedObject() = default;
+
+ virtual Error finalize() = 0;
+
+ virtual JITSymbol::GetAddressFtor
+ getSymbolMaterializer(std::string Name) = 0;
+
+ virtual void mapSectionAddress(const void *LocalAddress,
+ JITTargetAddress TargetAddr) const = 0;
+
+ JITSymbol getSymbol(StringRef Name, bool ExportedSymbolsOnly) {
+ auto SymEntry = SymbolTable.find(Name);
+ if (SymEntry == SymbolTable.end())
+ return nullptr;
+ if (!SymEntry->second.getFlags().isExported() && ExportedSymbolsOnly)
+ return nullptr;
+ if (!Finalized)
+ return JITSymbol(getSymbolMaterializer(Name),
+ SymEntry->second.getFlags());
+ return JITSymbol(SymEntry->second);
+ }
+
+ protected:
+ StringMap<JITEvaluatedSymbol> SymbolTable;
+ bool Finalized = false;
+ };
+};
+
+/// @brief Bare bones object linking layer.
+///
+/// This class is intended to be used as the base layer for a JIT. It allows
+/// object files to be loaded into memory, linked, and the addresses of their
+/// symbols queried. All objects added to this layer can see each other's
+/// symbols.
+class RTDyldObjectLinkingLayer : public RTDyldObjectLinkingLayerBase {
+public:
+
+ using RTDyldObjectLinkingLayerBase::ObjectPtr;
+
+ /// @brief Functor for receiving object-loaded notifications.
+ using NotifyLoadedFtor =
+ std::function<void(VModuleKey, const object::ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &)>;
+
+ /// @brief Functor for receiving finalization notifications.
+ using NotifyFinalizedFtor = std::function<void(VModuleKey)>;
+
+private:
+ using OwnedObject = object::OwningBinary<object::ObjectFile>;
+
+ template <typename MemoryManagerPtrT>
+ class ConcreteLinkedObject : public LinkedObject {
+ public:
+ ConcreteLinkedObject(RTDyldObjectLinkingLayer &Parent, VModuleKey K,
+ OwnedObject Obj, MemoryManagerPtrT MemMgr,
+ std::shared_ptr<SymbolResolver> Resolver,
+ bool ProcessAllSections)
+ : MemMgr(std::move(MemMgr)),
+ PFC(llvm::make_unique<PreFinalizeContents>(
+ Parent, std::move(K), std::move(Obj), std::move(Resolver),
+ ProcessAllSections)) {
+ buildInitialSymbolTable(PFC->Obj);
+ }
+
+ ~ConcreteLinkedObject() override {
+ MemMgr->deregisterEHFrames();
+ }
+
+ Error finalize() override {
+ assert(PFC && "mapSectionAddress called on finalized LinkedObject");
+
+ JITSymbolResolverAdapter ResolverAdapter(PFC->Parent.ES, *PFC->Resolver);
+ PFC->RTDyld = llvm::make_unique<RuntimeDyld>(*MemMgr, ResolverAdapter);
+ PFC->RTDyld->setProcessAllSections(PFC->ProcessAllSections);
+
+ Finalized = true;
+
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> Info =
+ PFC->RTDyld->loadObject(*PFC->Obj.getBinary());
+
+ // Copy the symbol table out of the RuntimeDyld instance.
+ {
+ auto SymTab = PFC->RTDyld->getSymbolTable();
+ for (auto &KV : SymTab)
+ SymbolTable[KV.first] = KV.second;
+ }
+
+ if (PFC->Parent.NotifyLoaded)
+ PFC->Parent.NotifyLoaded(PFC->K, *PFC->Obj.getBinary(), *Info);
+
+ PFC->RTDyld->finalizeWithMemoryManagerLocking();
+
+ if (PFC->RTDyld->hasError())
+ return make_error<StringError>(PFC->RTDyld->getErrorString(),
+ inconvertibleErrorCode());
+
+ if (PFC->Parent.NotifyFinalized)
+ PFC->Parent.NotifyFinalized(PFC->K);
+
+ // Release resources.
+ PFC = nullptr;
+ return Error::success();
+ }
+
+ JITSymbol::GetAddressFtor getSymbolMaterializer(std::string Name) override {
+ return [this, Name]() -> Expected<JITTargetAddress> {
+ // The symbol may be materialized between the creation of this lambda
+ // and its execution, so we need to double check.
+ if (!this->Finalized)
+ if (auto Err = this->finalize())
+ return std::move(Err);
+ return this->getSymbol(Name, false).getAddress();
+ };
+ }
+
+ void mapSectionAddress(const void *LocalAddress,
+ JITTargetAddress TargetAddr) const override {
+ assert(PFC && "mapSectionAddress called on finalized LinkedObject");
+ assert(PFC->RTDyld && "mapSectionAddress called on raw LinkedObject");
+ PFC->RTDyld->mapSectionAddress(LocalAddress, TargetAddr);
+ }
+
+ private:
+ void buildInitialSymbolTable(const OwnedObject &Obj) {
+ for (auto &Symbol : Obj.getBinary()->symbols()) {
+ if (Symbol.getFlags() & object::SymbolRef::SF_Undefined)
+ continue;
+ Expected<StringRef> SymbolName = Symbol.getName();
+ // FIXME: Raise an error for bad symbols.
+ if (!SymbolName) {
+ consumeError(SymbolName.takeError());
+ continue;
+ }
+ auto Flags = JITSymbolFlags::fromObjectSymbol(Symbol);
+ SymbolTable.insert(
+ std::make_pair(*SymbolName, JITEvaluatedSymbol(0, Flags)));
+ }
+ }
+
+ // Contains the information needed prior to finalization: the object files,
+ // memory manager, resolver, and flags needed for RuntimeDyld.
+ struct PreFinalizeContents {
+ PreFinalizeContents(RTDyldObjectLinkingLayer &Parent, VModuleKey K,
+ OwnedObject Obj,
+ std::shared_ptr<SymbolResolver> Resolver,
+ bool ProcessAllSections)
+ : Parent(Parent), K(std::move(K)), Obj(std::move(Obj)),
+ Resolver(std::move(Resolver)),
+ ProcessAllSections(ProcessAllSections) {}
+
+ RTDyldObjectLinkingLayer &Parent;
+ VModuleKey K;
+ OwnedObject Obj;
+ std::shared_ptr<SymbolResolver> Resolver;
+ bool ProcessAllSections;
+ std::unique_ptr<RuntimeDyld> RTDyld;
+ };
+
+ MemoryManagerPtrT MemMgr;
+ std::unique_ptr<PreFinalizeContents> PFC;
+ };
+
+ template <typename MemoryManagerPtrT>
+ std::unique_ptr<ConcreteLinkedObject<MemoryManagerPtrT>>
+ createLinkedObject(RTDyldObjectLinkingLayer &Parent, VModuleKey K,
+ OwnedObject Obj, MemoryManagerPtrT MemMgr,
+ std::shared_ptr<SymbolResolver> Resolver,
+ bool ProcessAllSections) {
+ using LOS = ConcreteLinkedObject<MemoryManagerPtrT>;
+ return llvm::make_unique<LOS>(Parent, std::move(K), std::move(Obj),
+ std::move(MemMgr), std::move(Resolver),
+ ProcessAllSections);
+ }
+
+public:
+ struct Resources {
+ std::shared_ptr<RuntimeDyld::MemoryManager> MemMgr;
+ std::shared_ptr<SymbolResolver> Resolver;
+ };
+
+ using ResourcesGetter = std::function<Resources(VModuleKey)>;
+
+ /// @brief Construct an ObjectLinkingLayer with the given NotifyLoaded,
+ /// and NotifyFinalized functors.
+ RTDyldObjectLinkingLayer(
+ ExecutionSession &ES, ResourcesGetter GetResources,
+ NotifyLoadedFtor NotifyLoaded = NotifyLoadedFtor(),
+ NotifyFinalizedFtor NotifyFinalized = NotifyFinalizedFtor())
+ : ES(ES), GetResources(std::move(GetResources)),
+ NotifyLoaded(std::move(NotifyLoaded)),
+ NotifyFinalized(std::move(NotifyFinalized)), ProcessAllSections(false) {
+ }
+
+ /// @brief Set the 'ProcessAllSections' flag.
+ ///
+ /// If set to true, all sections in each object file will be allocated using
+ /// the memory manager, rather than just the sections required for execution.
+ ///
+ /// This is kludgy, and may be removed in the future.
+ void setProcessAllSections(bool ProcessAllSections) {
+ this->ProcessAllSections = ProcessAllSections;
+ }
+
+ /// @brief Add an object to the JIT.
+ Error addObject(VModuleKey K, ObjectPtr ObjBuffer) {
+
+ auto Obj =
+ object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef());
+ if (!Obj)
+ return Obj.takeError();
+
+ assert(!LinkedObjects.count(K) && "VModuleKey already in use");
+
+ auto R = GetResources(K);
+
+ LinkedObjects[K] = createLinkedObject(
+ *this, K, OwnedObject(std::move(*Obj), std::move(ObjBuffer)),
+ std::move(R.MemMgr), std::move(R.Resolver), ProcessAllSections);
+
+ return Error::success();
+ }
+
+ /// @brief Remove the object associated with VModuleKey K.
+ ///
+ /// All memory allocated for the object will be freed, and the sections and
+ /// symbols it provided will no longer be available. No attempt is made to
+ /// re-emit the missing symbols, and any use of these symbols (directly or
+ /// indirectly) will result in undefined behavior. If dependence tracking is
+ /// required to detect or resolve such issues it should be added at a higher
+ /// layer.
+ Error removeObject(VModuleKey K) {
+ assert(LinkedObjects.count(K) && "VModuleKey not associated with object");
+ // How do we invalidate the symbols in H?
+ LinkedObjects.erase(K);
+ return Error::success();
+ }
+
+ /// @brief Search for the given named symbol.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it exists.
+ JITSymbol findSymbol(StringRef Name, bool ExportedSymbolsOnly) {
+ for (auto &KV : LinkedObjects)
+ if (auto Sym = KV.second->getSymbol(Name, ExportedSymbolsOnly))
+ return Sym;
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
+
+ return nullptr;
+ }
+
+ /// @brief Search for the given named symbol in the context of the loaded
+ /// object represented by the VModuleKey K.
+ /// @param K The VModuleKey for the object to search in.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it is found in the
+ /// given object.
+ JITSymbol findSymbolIn(VModuleKey K, StringRef Name,
+ bool ExportedSymbolsOnly) {
+ assert(LinkedObjects.count(K) && "VModuleKey not associated with object");
+ return LinkedObjects[K]->getSymbol(Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Map section addresses for the object associated with the
+ /// VModuleKey K.
+ void mapSectionAddress(VModuleKey K, const void *LocalAddress,
+ JITTargetAddress TargetAddr) {
+ assert(LinkedObjects.count(K) && "VModuleKey not associated with object");
+ LinkedObjects[K]->mapSectionAddress(LocalAddress, TargetAddr);
+ }
+
+ /// @brief Immediately emit and finalize the object represented by the given
+ /// VModuleKey.
+ /// @param K VModuleKey for object to emit/finalize.
+ Error emitAndFinalize(VModuleKey K) {
+ assert(LinkedObjects.count(K) && "VModuleKey not associated with object");
+ return LinkedObjects[K]->finalize();
+ }
+
+private:
+ ExecutionSession &ES;
+
+ std::map<VModuleKey, std::unique_ptr<LinkedObject>> LinkedObjects;
+ ResourcesGetter GetResources;
+ NotifyLoadedFtor NotifyLoaded;
+ NotifyFinalizedFtor NotifyFinalized;
+ bool ProcessAllSections = false;
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_RTDYLDOBJECTLINKINGLAYER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RawByteChannel.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RawByteChannel.h
new file mode 100644
index 0000000..db810f4
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RawByteChannel.h
@@ -0,0 +1,185 @@
+//===- llvm/ExecutionEngine/Orc/RawByteChannel.h ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_RAWBYTECHANNEL_H
+#define LLVM_EXECUTIONENGINE_ORC_RAWBYTECHANNEL_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ExecutionEngine/Orc/RPCSerialization.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <mutex>
+#include <string>
+#include <type_traits>
+
+namespace llvm {
+namespace orc {
+namespace rpc {
+
+/// Interface for byte-streams to be used with RPC.
+class RawByteChannel {
+public:
+ virtual ~RawByteChannel() = default;
+
+ /// Read Size bytes from the stream into *Dst.
+ virtual Error readBytes(char *Dst, unsigned Size) = 0;
+
+ /// Read size bytes from *Src and append them to the stream.
+ virtual Error appendBytes(const char *Src, unsigned Size) = 0;
+
+ /// Flush the stream if possible.
+ virtual Error send() = 0;
+
+ /// Notify the channel that we're starting a message send.
+ /// Locks the channel for writing.
+ template <typename FunctionIdT, typename SequenceIdT>
+ Error startSendMessage(const FunctionIdT &FnId, const SequenceIdT &SeqNo) {
+ writeLock.lock();
+ if (auto Err = serializeSeq(*this, FnId, SeqNo)) {
+ writeLock.unlock();
+ return Err;
+ }
+ return Error::success();
+ }
+
+ /// Notify the channel that we're ending a message send.
+ /// Unlocks the channel for writing.
+ Error endSendMessage() {
+ writeLock.unlock();
+ return Error::success();
+ }
+
+ /// Notify the channel that we're starting a message receive.
+ /// Locks the channel for reading.
+ template <typename FunctionIdT, typename SequenceNumberT>
+ Error startReceiveMessage(FunctionIdT &FnId, SequenceNumberT &SeqNo) {
+ readLock.lock();
+ if (auto Err = deserializeSeq(*this, FnId, SeqNo)) {
+ readLock.unlock();
+ return Err;
+ }
+ return Error::success();
+ }
+
+ /// Notify the channel that we're ending a message receive.
+ /// Unlocks the channel for reading.
+ Error endReceiveMessage() {
+ readLock.unlock();
+ return Error::success();
+ }
+
+ /// Get the lock for stream reading.
+ std::mutex &getReadLock() { return readLock; }
+
+ /// Get the lock for stream writing.
+ std::mutex &getWriteLock() { return writeLock; }
+
+private:
+ std::mutex readLock, writeLock;
+};
+
+template <typename ChannelT, typename T>
+class SerializationTraits<
+ ChannelT, T, T,
+ typename std::enable_if<
+ std::is_base_of<RawByteChannel, ChannelT>::value &&
+ (std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value ||
+ std::is_same<T, uint16_t>::value || std::is_same<T, int16_t>::value ||
+ std::is_same<T, uint32_t>::value || std::is_same<T, int32_t>::value ||
+ std::is_same<T, uint64_t>::value || std::is_same<T, int64_t>::value ||
+ std::is_same<T, char>::value)>::type> {
+public:
+ static Error serialize(ChannelT &C, T V) {
+ support::endian::byte_swap<T, support::big>(V);
+ return C.appendBytes(reinterpret_cast<const char *>(&V), sizeof(T));
+ };
+
+ static Error deserialize(ChannelT &C, T &V) {
+ if (auto Err = C.readBytes(reinterpret_cast<char *>(&V), sizeof(T)))
+ return Err;
+ support::endian::byte_swap<T, support::big>(V);
+ return Error::success();
+ };
+};
+
+template <typename ChannelT>
+class SerializationTraits<ChannelT, bool, bool,
+ typename std::enable_if<std::is_base_of<
+ RawByteChannel, ChannelT>::value>::type> {
+public:
+ static Error serialize(ChannelT &C, bool V) {
+ uint8_t Tmp = V ? 1 : 0;
+ if (auto Err =
+ C.appendBytes(reinterpret_cast<const char *>(&Tmp), 1))
+ return Err;
+ return Error::success();
+ }
+
+ static Error deserialize(ChannelT &C, bool &V) {
+ uint8_t Tmp = 0;
+ if (auto Err = C.readBytes(reinterpret_cast<char *>(&Tmp), 1))
+ return Err;
+ V = Tmp != 0;
+ return Error::success();
+ }
+};
+
+template <typename ChannelT>
+class SerializationTraits<ChannelT, std::string, StringRef,
+ typename std::enable_if<std::is_base_of<
+ RawByteChannel, ChannelT>::value>::type> {
+public:
+ /// RPC channel serialization for std::strings.
+ static Error serialize(RawByteChannel &C, StringRef S) {
+ if (auto Err = serializeSeq(C, static_cast<uint64_t>(S.size())))
+ return Err;
+ return C.appendBytes((const char *)S.data(), S.size());
+ }
+};
+
+template <typename ChannelT, typename T>
+class SerializationTraits<ChannelT, std::string, T,
+ typename std::enable_if<
+ std::is_base_of<RawByteChannel, ChannelT>::value &&
+ (std::is_same<T, const char*>::value ||
+ std::is_same<T, char*>::value)>::type> {
+public:
+ static Error serialize(RawByteChannel &C, const char *S) {
+ return SerializationTraits<ChannelT, std::string, StringRef>::serialize(C,
+ S);
+ }
+};
+
+template <typename ChannelT>
+class SerializationTraits<ChannelT, std::string, std::string,
+ typename std::enable_if<std::is_base_of<
+ RawByteChannel, ChannelT>::value>::type> {
+public:
+ /// RPC channel serialization for std::strings.
+ static Error serialize(RawByteChannel &C, const std::string &S) {
+ return SerializationTraits<ChannelT, std::string, StringRef>::serialize(C,
+ S);
+ }
+
+ /// RPC channel deserialization for std::strings.
+ static Error deserialize(RawByteChannel &C, std::string &S) {
+ uint64_t Count = 0;
+ if (auto Err = deserializeSeq(C, Count))
+ return Err;
+ S.resize(Count);
+ return C.readBytes(&S[0], Count);
+ }
+};
+
+} // end namespace rpc
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_RAWBYTECHANNEL_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RemoteObjectLayer.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RemoteObjectLayer.h
new file mode 100644
index 0000000..b95faaa
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/RemoteObjectLayer.h
@@ -0,0 +1,529 @@
+//===------ RemoteObjectLayer.h - Forwards objs to a remote -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Forwards objects to a remote object layer via RPC.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_REMOTEOBJECTLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_REMOTEOBJECTLAYER_H
+
+#include "llvm/ExecutionEngine/Orc/OrcRemoteTargetRPCAPI.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/ExecutionEngine/Orc/LambdaResolver.h"
+#include <map>
+
+namespace llvm {
+namespace orc {
+
+/// RPC API needed by RemoteObjectClientLayer and RemoteObjectServerLayer.
+class RemoteObjectLayerAPI {
+public:
+
+ using ObjHandleT = remote::ResourceIdMgr::ResourceId;
+
+protected:
+
+ using RemoteSymbolId = remote::ResourceIdMgr::ResourceId;
+ using RemoteSymbol = std::pair<RemoteSymbolId, JITSymbolFlags>;
+
+public:
+
+ using BadSymbolHandleError = remote::ResourceNotFound<RemoteSymbolId>;
+ using BadObjectHandleError = remote::ResourceNotFound<ObjHandleT>;
+
+protected:
+
+ static const ObjHandleT InvalidObjectHandleId = 0;
+ static const RemoteSymbolId NullSymbolId = 0;
+
+ class AddObject
+ : public rpc::Function<AddObject, Expected<ObjHandleT>(std::string)> {
+ public:
+ static const char *getName() { return "AddObject"; }
+ };
+
+ class RemoveObject
+ : public rpc::Function<RemoveObject, Error(ObjHandleT)> {
+ public:
+ static const char *getName() { return "RemoveObject"; }
+ };
+
+ class FindSymbol
+ : public rpc::Function<FindSymbol, Expected<RemoteSymbol>(std::string,
+ bool)> {
+ public:
+ static const char *getName() { return "FindSymbol"; }
+ };
+
+ class FindSymbolIn
+ : public rpc::Function<FindSymbolIn,
+ Expected<RemoteSymbol>(ObjHandleT, std::string,
+ bool)> {
+ public:
+ static const char *getName() { return "FindSymbolIn"; }
+ };
+
+ class EmitAndFinalize
+ : public rpc::Function<EmitAndFinalize,
+ Error(ObjHandleT)> {
+ public:
+ static const char *getName() { return "EmitAndFinalize"; }
+ };
+
+ class Lookup
+ : public rpc::Function<Lookup,
+ Expected<RemoteSymbol>(ObjHandleT, std::string)> {
+ public:
+ static const char *getName() { return "Lookup"; }
+ };
+
+ class LookupInLogicalDylib
+ : public rpc::Function<LookupInLogicalDylib,
+ Expected<RemoteSymbol>(ObjHandleT, std::string)> {
+ public:
+ static const char *getName() { return "LookupInLogicalDylib"; }
+ };
+
+ class ReleaseRemoteSymbol
+ : public rpc::Function<ReleaseRemoteSymbol, Error(RemoteSymbolId)> {
+ public:
+ static const char *getName() { return "ReleaseRemoteSymbol"; }
+ };
+
+ class MaterializeRemoteSymbol
+ : public rpc::Function<MaterializeRemoteSymbol,
+ Expected<JITTargetAddress>(RemoteSymbolId)> {
+ public:
+ static const char *getName() { return "MaterializeRemoteSymbol"; }
+ };
+};
+
+/// Base class containing common utilities for RemoteObjectClientLayer and
+/// RemoteObjectServerLayer.
+template <typename RPCEndpoint>
+class RemoteObjectLayer : public RemoteObjectLayerAPI {
+public:
+
+ RemoteObjectLayer(RPCEndpoint &Remote,
+ std::function<void(Error)> ReportError)
+ : Remote(Remote), ReportError(std::move(ReportError)),
+ SymbolIdMgr(NullSymbolId + 1) {
+ using ThisT = RemoteObjectLayer<RPCEndpoint>;
+ Remote.template addHandler<ReleaseRemoteSymbol>(
+ *this, &ThisT::handleReleaseRemoteSymbol);
+ Remote.template addHandler<MaterializeRemoteSymbol>(
+ *this, &ThisT::handleMaterializeRemoteSymbol);
+ }
+
+protected:
+
+ /// This class is used as the symbol materializer for JITSymbols returned by
+ /// RemoteObjectLayerClient/RemoteObjectLayerServer -- the materializer knows
+ /// how to call back to the other RPC endpoint to get the address when
+ /// requested.
+ class RemoteSymbolMaterializer {
+ public:
+
+ /// Construct a RemoteSymbolMaterializer for the given RemoteObjectLayer
+ /// with the given Id.
+ RemoteSymbolMaterializer(RemoteObjectLayer &C,
+ RemoteSymbolId Id)
+ : C(C), Id(Id) {}
+
+ RemoteSymbolMaterializer(const RemoteSymbolMaterializer &Other)
+ : C(Other.C), Id(Other.Id) {
+ // FIXME: This is a horrible, auto_ptr-style, copy-as-move operation.
+ // It should be removed as soon as LLVM has C++14's generalized
+ // lambda capture (at which point the materializer can be moved
+ // into the lambda in remoteToJITSymbol below).
+ const_cast<RemoteSymbolMaterializer&>(Other).Id = 0;
+ }
+
+ RemoteSymbolMaterializer&
+ operator=(const RemoteSymbolMaterializer&) = delete;
+
+ /// Release the remote symbol.
+ ~RemoteSymbolMaterializer() {
+ if (Id)
+ C.releaseRemoteSymbol(Id);
+ }
+
+ /// Materialize the symbol on the remote and get its address.
+ Expected<JITTargetAddress> materialize() {
+ auto Addr = C.materializeRemoteSymbol(Id);
+ Id = 0;
+ return Addr;
+ }
+
+ private:
+ RemoteObjectLayer &C;
+ RemoteSymbolId Id;
+ };
+
+ /// Convenience function for getting a null remote symbol value.
+ RemoteSymbol nullRemoteSymbol() {
+ return RemoteSymbol(0, JITSymbolFlags());
+ }
+
+ /// Creates a StringError that contains a copy of Err's log message, then
+ /// sends that StringError to ReportError.
+ ///
+ /// This allows us to locally log error messages for errors that will actually
+ /// be delivered to the remote.
+ Error teeLog(Error Err) {
+ return handleErrors(std::move(Err),
+ [this](std::unique_ptr<ErrorInfoBase> EIB) {
+ ReportError(make_error<StringError>(
+ EIB->message(),
+ EIB->convertToErrorCode()));
+ return Error(std::move(EIB));
+ });
+ }
+
+ Error badRemoteSymbolIdError(RemoteSymbolId Id) {
+ return make_error<BadSymbolHandleError>(Id, "Remote JIT Symbol");
+ }
+
+ Error badObjectHandleError(ObjHandleT H) {
+ return make_error<RemoteObjectLayerAPI::BadObjectHandleError>(
+ H, "Bad object handle");
+ }
+
+ /// Create a RemoteSymbol wrapping the given JITSymbol.
+ Expected<RemoteSymbol> jitSymbolToRemote(JITSymbol Sym) {
+ if (Sym) {
+ auto Id = SymbolIdMgr.getNext();
+ auto Flags = Sym.getFlags();
+ assert(!InUseSymbols.count(Id) && "Symbol id already in use");
+ InUseSymbols.insert(std::make_pair(Id, std::move(Sym)));
+ return RemoteSymbol(Id, Flags);
+ } else if (auto Err = Sym.takeError())
+ return teeLog(std::move(Err));
+ // else...
+ return nullRemoteSymbol();
+ }
+
+ /// Convert an Expected<RemoteSymbol> to a JITSymbol.
+ JITSymbol remoteToJITSymbol(Expected<RemoteSymbol> RemoteSymOrErr) {
+ if (RemoteSymOrErr) {
+ auto &RemoteSym = *RemoteSymOrErr;
+ if (RemoteSym == nullRemoteSymbol())
+ return nullptr;
+ // else...
+ RemoteSymbolMaterializer RSM(*this, RemoteSym.first);
+ auto Sym =
+ JITSymbol([RSM]() mutable { return RSM.materialize(); },
+ RemoteSym.second);
+ return Sym;
+ } else
+ return RemoteSymOrErr.takeError();
+ }
+
+ RPCEndpoint &Remote;
+ std::function<void(Error)> ReportError;
+
+private:
+
+ /// Notify the remote to release the given JITSymbol.
+ void releaseRemoteSymbol(RemoteSymbolId Id) {
+ if (auto Err = Remote.template callB<ReleaseRemoteSymbol>(Id))
+ ReportError(std::move(Err));
+ }
+
+ /// Notify the remote to materialize the JITSymbol with the given Id and
+ /// return its address.
+ Expected<JITTargetAddress> materializeRemoteSymbol(RemoteSymbolId Id) {
+ return Remote.template callB<MaterializeRemoteSymbol>(Id);
+ }
+
+ /// Release the JITSymbol with the given Id.
+ Error handleReleaseRemoteSymbol(RemoteSymbolId Id) {
+ auto SI = InUseSymbols.find(Id);
+ if (SI != InUseSymbols.end()) {
+ InUseSymbols.erase(SI);
+ return Error::success();
+ } else
+ return teeLog(badRemoteSymbolIdError(Id));
+ }
+
+ /// Run the materializer for the JITSymbol with the given Id and return its
+ /// address.
+ Expected<JITTargetAddress> handleMaterializeRemoteSymbol(RemoteSymbolId Id) {
+ auto SI = InUseSymbols.find(Id);
+ if (SI != InUseSymbols.end()) {
+ auto AddrOrErr = SI->second.getAddress();
+ InUseSymbols.erase(SI);
+ SymbolIdMgr.release(Id);
+ if (AddrOrErr)
+ return *AddrOrErr;
+ else
+ return teeLog(AddrOrErr.takeError());
+ } else {
+ return teeLog(badRemoteSymbolIdError(Id));
+ }
+ }
+
+ remote::ResourceIdMgr SymbolIdMgr;
+ std::map<RemoteSymbolId, JITSymbol> InUseSymbols;
+};
+
+/// RemoteObjectClientLayer forwards the ORC Object Layer API over an RPC
+/// connection.
+///
+/// This class can be used as the base layer of a JIT stack on the client and
+/// will forward operations to a corresponding RemoteObjectServerLayer on the
+/// server (which can be composed on top of a "real" object layer like
+/// RTDyldObjectLinkingLayer to actually carry out the operations).
+///
+/// Sending relocatable objects to the server (rather than fully relocated
+/// bits) allows JIT'd code to be cached on the server side and re-used in
+/// subsequent JIT sessions.
+template <typename RPCEndpoint>
+class RemoteObjectClientLayer : public RemoteObjectLayer<RPCEndpoint> {
+private:
+
+ using AddObject = RemoteObjectLayerAPI::AddObject;
+ using RemoveObject = RemoteObjectLayerAPI::RemoveObject;
+ using FindSymbol = RemoteObjectLayerAPI::FindSymbol;
+ using FindSymbolIn = RemoteObjectLayerAPI::FindSymbolIn;
+ using EmitAndFinalize = RemoteObjectLayerAPI::EmitAndFinalize;
+ using Lookup = RemoteObjectLayerAPI::Lookup;
+ using LookupInLogicalDylib = RemoteObjectLayerAPI::LookupInLogicalDylib;
+
+ using RemoteObjectLayer<RPCEndpoint>::teeLog;
+ using RemoteObjectLayer<RPCEndpoint>::badObjectHandleError;
+ using RemoteObjectLayer<RPCEndpoint>::remoteToJITSymbol;
+
+public:
+
+ using ObjHandleT = RemoteObjectLayerAPI::ObjHandleT;
+ using RemoteSymbol = RemoteObjectLayerAPI::RemoteSymbol;
+
+ using ObjectPtr = std::unique_ptr<MemoryBuffer>;
+
+ /// Create a RemoteObjectClientLayer that communicates with a
+ /// RemoteObjectServerLayer instance via the given RPCEndpoint.
+ ///
+ /// The ReportError functor can be used locally log errors that are intended
+ /// to be sent sent
+ RemoteObjectClientLayer(RPCEndpoint &Remote,
+ std::function<void(Error)> ReportError)
+ : RemoteObjectLayer<RPCEndpoint>(Remote, std::move(ReportError)) {
+ using ThisT = RemoteObjectClientLayer<RPCEndpoint>;
+ Remote.template addHandler<Lookup>(*this, &ThisT::lookup);
+ Remote.template addHandler<LookupInLogicalDylib>(
+ *this, &ThisT::lookupInLogicalDylib);
+ }
+
+ /// @brief Add an object to the JIT.
+ ///
+ /// @return A handle that can be used to refer to the loaded object (for
+ /// symbol searching, finalization, freeing memory, etc.).
+ Expected<ObjHandleT>
+ addObject(ObjectPtr ObjBuffer,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver) {
+ if (auto HandleOrErr =
+ this->Remote.template callB<AddObject>(ObjBuffer->getBuffer())) {
+ auto &Handle = *HandleOrErr;
+ // FIXME: Return an error for this:
+ assert(!Resolvers.count(Handle) && "Handle already in use?");
+ Resolvers[Handle] = std::move(Resolver);
+ return Handle;
+ } else
+ return HandleOrErr.takeError();
+ }
+
+ /// @brief Remove the given object from the JIT.
+ Error removeObject(ObjHandleT H) {
+ return this->Remote.template callB<RemoveObject>(H);
+ }
+
+ /// @brief Search for the given named symbol.
+ JITSymbol findSymbol(StringRef Name, bool ExportedSymbolsOnly) {
+ return remoteToJITSymbol(
+ this->Remote.template callB<FindSymbol>(Name,
+ ExportedSymbolsOnly));
+ }
+
+ /// @brief Search for the given named symbol within the given context.
+ JITSymbol findSymbolIn(ObjHandleT H, StringRef Name, bool ExportedSymbolsOnly) {
+ return remoteToJITSymbol(
+ this->Remote.template callB<FindSymbolIn>(H, Name,
+ ExportedSymbolsOnly));
+ }
+
+ /// @brief Immediately emit and finalize the object with the given handle.
+ Error emitAndFinalize(ObjHandleT H) {
+ return this->Remote.template callB<EmitAndFinalize>(H);
+ }
+
+private:
+
+ Expected<RemoteSymbol> lookup(ObjHandleT H, const std::string &Name) {
+ auto RI = Resolvers.find(H);
+ if (RI != Resolvers.end()) {
+ return this->jitSymbolToRemote(RI->second->findSymbol(Name));
+ } else
+ return teeLog(badObjectHandleError(H));
+ }
+
+ Expected<RemoteSymbol> lookupInLogicalDylib(ObjHandleT H,
+ const std::string &Name) {
+ auto RI = Resolvers.find(H);
+ if (RI != Resolvers.end())
+ return this->jitSymbolToRemote(
+ RI->second->findSymbolInLogicalDylib(Name));
+ else
+ return teeLog(badObjectHandleError(H));
+ }
+
+ std::map<remote::ResourceIdMgr::ResourceId,
+ std::shared_ptr<LegacyJITSymbolResolver>>
+ Resolvers;
+};
+
+/// RemoteObjectServerLayer acts as a server and handling RPC calls for the
+/// object layer API from the given RPC connection.
+///
+/// This class can be composed on top of a 'real' object layer (e.g.
+/// RTDyldObjectLinkingLayer) to do the actual work of relocating objects
+/// and making them executable.
+template <typename BaseLayerT, typename RPCEndpoint>
+class RemoteObjectServerLayer : public RemoteObjectLayer<RPCEndpoint> {
+private:
+
+ using ObjHandleT = RemoteObjectLayerAPI::ObjHandleT;
+ using RemoteSymbol = RemoteObjectLayerAPI::RemoteSymbol;
+
+ using AddObject = RemoteObjectLayerAPI::AddObject;
+ using RemoveObject = RemoteObjectLayerAPI::RemoveObject;
+ using FindSymbol = RemoteObjectLayerAPI::FindSymbol;
+ using FindSymbolIn = RemoteObjectLayerAPI::FindSymbolIn;
+ using EmitAndFinalize = RemoteObjectLayerAPI::EmitAndFinalize;
+ using Lookup = RemoteObjectLayerAPI::Lookup;
+ using LookupInLogicalDylib = RemoteObjectLayerAPI::LookupInLogicalDylib;
+
+ using RemoteObjectLayer<RPCEndpoint>::teeLog;
+ using RemoteObjectLayer<RPCEndpoint>::badObjectHandleError;
+ using RemoteObjectLayer<RPCEndpoint>::remoteToJITSymbol;
+
+public:
+
+ /// Create a RemoteObjectServerLayer with the given base layer (which must be
+ /// an object layer), RPC endpoint, and error reporter function.
+ RemoteObjectServerLayer(BaseLayerT &BaseLayer,
+ RPCEndpoint &Remote,
+ std::function<void(Error)> ReportError)
+ : RemoteObjectLayer<RPCEndpoint>(Remote, std::move(ReportError)),
+ BaseLayer(BaseLayer), HandleIdMgr(1) {
+ using ThisT = RemoteObjectServerLayer<BaseLayerT, RPCEndpoint>;
+
+ Remote.template addHandler<AddObject>(*this, &ThisT::addObject);
+ Remote.template addHandler<RemoveObject>(*this, &ThisT::removeObject);
+ Remote.template addHandler<FindSymbol>(*this, &ThisT::findSymbol);
+ Remote.template addHandler<FindSymbolIn>(*this, &ThisT::findSymbolIn);
+ Remote.template addHandler<EmitAndFinalize>(*this, &ThisT::emitAndFinalize);
+ }
+
+private:
+
+ class StringMemoryBuffer : public MemoryBuffer {
+ public:
+ StringMemoryBuffer(std::string Buffer)
+ : Buffer(std::move(Buffer)) {
+ init(this->Buffer.data(), this->Buffer.data() + this->Buffer.size(),
+ false);
+ }
+
+ BufferKind getBufferKind() const override { return MemoryBuffer_Malloc; }
+ private:
+ std::string Buffer;
+ };
+
+ JITSymbol lookup(ObjHandleT Id, const std::string &Name) {
+ return remoteToJITSymbol(
+ this->Remote.template callB<Lookup>(Id, Name));
+ }
+
+ JITSymbol lookupInLogicalDylib(ObjHandleT Id, const std::string &Name) {
+ return remoteToJITSymbol(
+ this->Remote.template callB<LookupInLogicalDylib>(Id, Name));
+ }
+
+ Expected<ObjHandleT> addObject(std::string ObjBuffer) {
+ auto Buffer = llvm::make_unique<StringMemoryBuffer>(std::move(ObjBuffer));
+ auto Id = HandleIdMgr.getNext();
+ assert(!BaseLayerHandles.count(Id) && "Id already in use?");
+
+ auto Resolver = createLambdaResolver(
+ [this, Id](const std::string &Name) { return lookup(Id, Name); },
+ [this, Id](const std::string &Name) {
+ return lookupInLogicalDylib(Id, Name);
+ });
+
+ if (auto HandleOrErr =
+ BaseLayer.addObject(std::move(Buffer), std::move(Resolver))) {
+ BaseLayerHandles[Id] = std::move(*HandleOrErr);
+ return Id;
+ } else
+ return teeLog(HandleOrErr.takeError());
+ }
+
+ Error removeObject(ObjHandleT H) {
+ auto HI = BaseLayerHandles.find(H);
+ if (HI != BaseLayerHandles.end()) {
+ if (auto Err = BaseLayer.removeObject(HI->second))
+ return teeLog(std::move(Err));
+ return Error::success();
+ } else
+ return teeLog(badObjectHandleError(H));
+ }
+
+ Expected<RemoteSymbol> findSymbol(const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ if (auto Sym = BaseLayer.findSymbol(Name, ExportedSymbolsOnly))
+ return this->jitSymbolToRemote(std::move(Sym));
+ else if (auto Err = Sym.takeError())
+ return teeLog(std::move(Err));
+ return this->nullRemoteSymbol();
+ }
+
+ Expected<RemoteSymbol> findSymbolIn(ObjHandleT H, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ auto HI = BaseLayerHandles.find(H);
+ if (HI != BaseLayerHandles.end()) {
+ if (auto Sym = BaseLayer.findSymbolIn(HI->second, Name, ExportedSymbolsOnly))
+ return this->jitSymbolToRemote(std::move(Sym));
+ else if (auto Err = Sym.takeError())
+ return teeLog(std::move(Err));
+ return this->nullRemoteSymbol();
+ } else
+ return teeLog(badObjectHandleError(H));
+ }
+
+ Error emitAndFinalize(ObjHandleT H) {
+ auto HI = BaseLayerHandles.find(H);
+ if (HI != BaseLayerHandles.end()) {
+ if (auto Err = BaseLayer.emitAndFinalize(HI->second))
+ return teeLog(std::move(Err));
+ return Error::success();
+ } else
+ return teeLog(badObjectHandleError(H));
+ }
+
+ BaseLayerT &BaseLayer;
+ remote::ResourceIdMgr HandleIdMgr;
+ std::map<ObjHandleT, typename BaseLayerT::ObjHandleT> BaseLayerHandles;
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_REMOTEOBJECTLAYER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/Orc/SymbolStringPool.h b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/SymbolStringPool.h
new file mode 100644
index 0000000..da40d1c
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/Orc/SymbolStringPool.h
@@ -0,0 +1,137 @@
+//===- SymbolStringPool.h - Multi-threaded pool for JIT symbols -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains a multi-threaded string pool suitable for use with ORC.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_SYMBOLSTRINGPOOL_H
+#define LLVM_EXECUTIONENGINE_ORC_SYMBOLSTRINGPOOL_H
+
+#include "llvm/ADT/StringMap.h"
+#include <atomic>
+#include <mutex>
+
+namespace llvm {
+namespace orc {
+
+class SymbolStringPtr;
+
+/// @brief String pool for symbol names used by the JIT.
+class SymbolStringPool {
+ friend class SymbolStringPtr;
+public:
+ /// @brief Create a symbol string pointer from the given string.
+ SymbolStringPtr intern(StringRef S);
+
+ /// @brief Remove from the pool any entries that are no longer referenced.
+ void clearDeadEntries();
+
+ /// @brief Returns true if the pool is empty.
+ bool empty() const;
+private:
+ using RefCountType = std::atomic<size_t>;
+ using PoolMap = StringMap<RefCountType>;
+ using PoolMapEntry = StringMapEntry<RefCountType>;
+ mutable std::mutex PoolMutex;
+ PoolMap Pool;
+};
+
+/// @brief Pointer to a pooled string representing a symbol name.
+class SymbolStringPtr {
+ friend class SymbolStringPool;
+ friend bool operator==(const SymbolStringPtr &LHS,
+ const SymbolStringPtr &RHS);
+ friend bool operator<(const SymbolStringPtr &LHS, const SymbolStringPtr &RHS);
+
+public:
+ SymbolStringPtr() = default;
+ SymbolStringPtr(const SymbolStringPtr &Other)
+ : S(Other.S) {
+ if (S)
+ ++S->getValue();
+ }
+
+ SymbolStringPtr& operator=(const SymbolStringPtr &Other) {
+ if (S)
+ --S->getValue();
+ S = Other.S;
+ if (S)
+ ++S->getValue();
+ return *this;
+ }
+
+ SymbolStringPtr(SymbolStringPtr &&Other) : S(nullptr) {
+ std::swap(S, Other.S);
+ }
+
+ SymbolStringPtr& operator=(SymbolStringPtr &&Other) {
+ if (S)
+ --S->getValue();
+ S = nullptr;
+ std::swap(S, Other.S);
+ return *this;
+ }
+
+ ~SymbolStringPtr() {
+ if (S)
+ --S->getValue();
+ }
+
+ StringRef operator*() const { return S->first(); }
+
+private:
+
+ SymbolStringPtr(SymbolStringPool::PoolMapEntry *S)
+ : S(S) {
+ if (S)
+ ++S->getValue();
+ }
+
+ SymbolStringPool::PoolMapEntry *S = nullptr;
+};
+
+inline bool operator==(const SymbolStringPtr &LHS, const SymbolStringPtr &RHS) {
+ return LHS.S == RHS.S;
+}
+
+inline bool operator!=(const SymbolStringPtr &LHS, const SymbolStringPtr &RHS) {
+ return !(LHS == RHS);
+}
+
+inline bool operator<(const SymbolStringPtr &LHS, const SymbolStringPtr &RHS) {
+ return LHS.S < RHS.S;
+}
+
+inline SymbolStringPtr SymbolStringPool::intern(StringRef S) {
+ std::lock_guard<std::mutex> Lock(PoolMutex);
+ PoolMap::iterator I;
+ bool Added;
+ std::tie(I, Added) = Pool.try_emplace(S, 0);
+ return SymbolStringPtr(&*I);
+}
+
+inline void SymbolStringPool::clearDeadEntries() {
+ std::lock_guard<std::mutex> Lock(PoolMutex);
+ for (auto I = Pool.begin(), E = Pool.end(); I != E;) {
+ auto Tmp = I++;
+ if (Tmp->second == 0)
+ Pool.erase(Tmp);
+ }
+}
+
+inline bool SymbolStringPool::empty() const {
+ std::lock_guard<std::mutex> Lock(PoolMutex);
+ return Pool.empty();
+}
+
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_SYMBOLSTRINGPOOL_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/OrcMCJITReplacement.h b/linux-x64/clang/include/llvm/ExecutionEngine/OrcMCJITReplacement.h
new file mode 100644
index 0000000..4cd5648
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/OrcMCJITReplacement.h
@@ -0,0 +1,38 @@
+//===---- OrcMCJITReplacement.h - Orc-based MCJIT replacement ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file forces OrcMCJITReplacement to link in on certain operating systems.
+// (Windows).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORCMCJITREPLACEMENT_H
+#define LLVM_EXECUTIONENGINE_ORCMCJITREPLACEMENT_H
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include <cstdlib>
+
+extern "C" void LLVMLinkInOrcMCJITReplacement();
+
+namespace {
+ struct ForceOrcMCJITReplacementLinking {
+ ForceOrcMCJITReplacementLinking() {
+ // We must reference OrcMCJITReplacement in such a way that compilers will
+ // not delete it all as dead code, even with whole program optimization,
+ // yet is effectively a NO-OP. As the compiler isn't smart enough to know
+ // that getenv() never returns -1, this will do the job.
+ if (std::getenv("bar") != (char*) -1)
+ return;
+
+ LLVMLinkInOrcMCJITReplacement();
+ }
+ } ForceOrcMCJITReplacementLinking;
+}
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/RTDyldMemoryManager.h b/linux-x64/clang/include/llvm/ExecutionEngine/RTDyldMemoryManager.h
new file mode 100644
index 0000000..ee75202
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/RTDyldMemoryManager.h
@@ -0,0 +1,153 @@
+//===-- RTDyldMemoryManager.cpp - Memory manager for MC-JIT -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface of the runtime dynamic memory manager base class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_RTDYLDMEMORYMANAGER_H
+#define LLVM_EXECUTIONENGINE_RTDYLDMEMORYMANAGER_H
+
+#include "llvm-c/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include <cstddef>
+#include <cstdint>
+#include <string>
+
+namespace llvm {
+
+class ExecutionEngine;
+
+namespace object {
+ class ObjectFile;
+} // end namespace object
+
+class MCJITMemoryManager : public RuntimeDyld::MemoryManager {
+public:
+ // Don't hide the notifyObjectLoaded method from RuntimeDyld::MemoryManager.
+ using RuntimeDyld::MemoryManager::notifyObjectLoaded;
+
+ /// This method is called after an object has been loaded into memory but
+ /// before relocations are applied to the loaded sections. The object load
+ /// may have been initiated by MCJIT to resolve an external symbol for another
+ /// object that is being finalized. In that case, the object about which
+ /// the memory manager is being notified will be finalized immediately after
+ /// the memory manager returns from this call.
+ ///
+ /// Memory managers which are preparing code for execution in an external
+ /// address space can use this call to remap the section addresses for the
+ /// newly loaded object.
+ virtual void notifyObjectLoaded(ExecutionEngine *EE,
+ const object::ObjectFile &) {}
+};
+
+// RuntimeDyld clients often want to handle the memory management of
+// what gets placed where. For JIT clients, this is the subset of
+// JITMemoryManager required for dynamic loading of binaries.
+//
+// FIXME: As the RuntimeDyld fills out, additional routines will be needed
+// for the varying types of objects to be allocated.
+class RTDyldMemoryManager : public MCJITMemoryManager,
+ public LegacyJITSymbolResolver {
+public:
+ RTDyldMemoryManager() = default;
+ RTDyldMemoryManager(const RTDyldMemoryManager&) = delete;
+ void operator=(const RTDyldMemoryManager&) = delete;
+ ~RTDyldMemoryManager() override;
+
+ /// Register EH frames in the current process.
+ static void registerEHFramesInProcess(uint8_t *Addr, size_t Size);
+
+ /// Deregister EH frames in the current proces.
+ static void deregisterEHFramesInProcess(uint8_t *Addr, size_t Size);
+
+ void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr, size_t Size) override;
+ void deregisterEHFrames() override;
+
+ /// This method returns the address of the specified function or variable in
+ /// the current process.
+ static uint64_t getSymbolAddressInProcess(const std::string &Name);
+
+ /// Legacy symbol lookup - DEPRECATED! Please override findSymbol instead.
+ ///
+ /// This method returns the address of the specified function or variable.
+ /// It is used to resolve symbols during module linking.
+ virtual uint64_t getSymbolAddress(const std::string &Name) {
+ return getSymbolAddressInProcess(Name);
+ }
+
+ /// This method returns a RuntimeDyld::SymbolInfo for the specified function
+ /// or variable. It is used to resolve symbols during module linking.
+ ///
+ /// By default this falls back on the legacy lookup method:
+ /// 'getSymbolAddress'. The address returned by getSymbolAddress is treated as
+ /// a strong, exported symbol, consistent with historical treatment by
+ /// RuntimeDyld.
+ ///
+ /// Clients writing custom RTDyldMemoryManagers are encouraged to override
+ /// this method and return a SymbolInfo with the flags set correctly. This is
+ /// necessary for RuntimeDyld to correctly handle weak and non-exported symbols.
+ JITSymbol findSymbol(const std::string &Name) override {
+ return JITSymbol(getSymbolAddress(Name), JITSymbolFlags::Exported);
+ }
+
+ /// Legacy symbol lookup -- DEPRECATED! Please override
+ /// findSymbolInLogicalDylib instead.
+ ///
+ /// Default to treating all modules as separate.
+ virtual uint64_t getSymbolAddressInLogicalDylib(const std::string &Name) {
+ return 0;
+ }
+
+ /// Default to treating all modules as separate.
+ ///
+ /// By default this falls back on the legacy lookup method:
+ /// 'getSymbolAddressInLogicalDylib'. The address returned by
+ /// getSymbolAddressInLogicalDylib is treated as a strong, exported symbol,
+ /// consistent with historical treatment by RuntimeDyld.
+ ///
+ /// Clients writing custom RTDyldMemoryManagers are encouraged to override
+ /// this method and return a SymbolInfo with the flags set correctly. This is
+ /// necessary for RuntimeDyld to correctly handle weak and non-exported symbols.
+ JITSymbol
+ findSymbolInLogicalDylib(const std::string &Name) override {
+ return JITSymbol(getSymbolAddressInLogicalDylib(Name),
+ JITSymbolFlags::Exported);
+ }
+
+ /// This method returns the address of the specified function. As such it is
+ /// only useful for resolving library symbols, not code generated symbols.
+ ///
+ /// If \p AbortOnFailure is false and no function with the given name is
+ /// found, this function returns a null pointer. Otherwise, it prints a
+ /// message to stderr and aborts.
+ ///
+ /// This function is deprecated for memory managers to be used with
+ /// MCJIT or RuntimeDyld. Use getSymbolAddress instead.
+ virtual void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true);
+
+protected:
+ struct EHFrame {
+ uint8_t *Addr;
+ size_t Size;
+ };
+ typedef std::vector<EHFrame> EHFrameInfos;
+ EHFrameInfos EHFrames;
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(
+ RTDyldMemoryManager, LLVMMCJITMemoryManagerRef)
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_RTDYLDMEMORYMANAGER_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/RuntimeDyld.h b/linux-x64/clang/include/llvm/ExecutionEngine/RuntimeDyld.h
new file mode 100644
index 0000000..14da5af
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/RuntimeDyld.h
@@ -0,0 +1,264 @@
+//===- RuntimeDyld.h - Run-time dynamic linker for MC-JIT -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface for the runtime dynamic linker facilities of the MC-JIT.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_RUNTIMEDYLD_H
+#define LLVM_EXECUTIONENGINE_RUNTIMEDYLD_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/DIContext.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Error.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <string>
+#include <system_error>
+
+namespace llvm {
+
+namespace object {
+
+template <typename T> class OwningBinary;
+
+} // end namespace object
+
+/// Base class for errors originating in RuntimeDyld, e.g. missing relocation
+/// support.
+class RuntimeDyldError : public ErrorInfo<RuntimeDyldError> {
+public:
+ static char ID;
+
+ RuntimeDyldError(std::string ErrMsg) : ErrMsg(std::move(ErrMsg)) {}
+
+ void log(raw_ostream &OS) const override;
+ const std::string &getErrorMessage() const { return ErrMsg; }
+ std::error_code convertToErrorCode() const override;
+
+private:
+ std::string ErrMsg;
+};
+
+class RuntimeDyldCheckerImpl;
+class RuntimeDyldImpl;
+
+class RuntimeDyld {
+ friend class RuntimeDyldCheckerImpl;
+
+protected:
+ // Change the address associated with a section when resolving relocations.
+ // Any relocations already associated with the symbol will be re-resolved.
+ void reassignSectionAddress(unsigned SectionID, uint64_t Addr);
+
+public:
+ /// \brief Information about the loaded object.
+ class LoadedObjectInfo : public llvm::LoadedObjectInfo {
+ friend class RuntimeDyldImpl;
+
+ public:
+ using ObjSectionToIDMap = std::map<object::SectionRef, unsigned>;
+
+ LoadedObjectInfo(RuntimeDyldImpl &RTDyld, ObjSectionToIDMap ObjSecToIDMap)
+ : RTDyld(RTDyld), ObjSecToIDMap(std::move(ObjSecToIDMap)) {}
+
+ virtual object::OwningBinary<object::ObjectFile>
+ getObjectForDebug(const object::ObjectFile &Obj) const = 0;
+
+ uint64_t
+ getSectionLoadAddress(const object::SectionRef &Sec) const override;
+
+ protected:
+ virtual void anchor();
+
+ RuntimeDyldImpl &RTDyld;
+ ObjSectionToIDMap ObjSecToIDMap;
+ };
+
+ /// \brief Memory Management.
+ class MemoryManager {
+ friend class RuntimeDyld;
+
+ public:
+ MemoryManager() = default;
+ virtual ~MemoryManager() = default;
+
+ /// Allocate a memory block of (at least) the given size suitable for
+ /// executable code. The SectionID is a unique identifier assigned by the
+ /// RuntimeDyld instance, and optionally recorded by the memory manager to
+ /// access a loaded section.
+ virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) = 0;
+
+ /// Allocate a memory block of (at least) the given size suitable for data.
+ /// The SectionID is a unique identifier assigned by the JIT engine, and
+ /// optionally recorded by the memory manager to access a loaded section.
+ virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName,
+ bool IsReadOnly) = 0;
+
+ /// Inform the memory manager about the total amount of memory required to
+ /// allocate all sections to be loaded:
+ /// \p CodeSize - the total size of all code sections
+ /// \p DataSizeRO - the total size of all read-only data sections
+ /// \p DataSizeRW - the total size of all read-write data sections
+ ///
+ /// Note that by default the callback is disabled. To enable it
+ /// redefine the method needsToReserveAllocationSpace to return true.
+ virtual void reserveAllocationSpace(uintptr_t CodeSize, uint32_t CodeAlign,
+ uintptr_t RODataSize,
+ uint32_t RODataAlign,
+ uintptr_t RWDataSize,
+ uint32_t RWDataAlign) {}
+
+ /// Override to return true to enable the reserveAllocationSpace callback.
+ virtual bool needsToReserveAllocationSpace() { return false; }
+
+ /// Register the EH frames with the runtime so that c++ exceptions work.
+ ///
+ /// \p Addr parameter provides the local address of the EH frame section
+ /// data, while \p LoadAddr provides the address of the data in the target
+ /// address space. If the section has not been remapped (which will usually
+ /// be the case for local execution) these two values will be the same.
+ virtual void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr,
+ size_t Size) = 0;
+ virtual void deregisterEHFrames() = 0;
+
+ /// This method is called when object loading is complete and section page
+ /// permissions can be applied. It is up to the memory manager implementation
+ /// to decide whether or not to act on this method. The memory manager will
+ /// typically allocate all sections as read-write and then apply specific
+ /// permissions when this method is called. Code sections cannot be executed
+ /// until this function has been called. In addition, any cache coherency
+ /// operations needed to reliably use the memory are also performed.
+ ///
+ /// Returns true if an error occurred, false otherwise.
+ virtual bool finalizeMemory(std::string *ErrMsg = nullptr) = 0;
+
+ /// This method is called after an object has been loaded into memory but
+ /// before relocations are applied to the loaded sections.
+ ///
+ /// Memory managers which are preparing code for execution in an external
+ /// address space can use this call to remap the section addresses for the
+ /// newly loaded object.
+ ///
+ /// For clients that do not need access to an ExecutionEngine instance this
+ /// method should be preferred to its cousin
+ /// MCJITMemoryManager::notifyObjectLoaded as this method is compatible with
+ /// ORC JIT stacks.
+ virtual void notifyObjectLoaded(RuntimeDyld &RTDyld,
+ const object::ObjectFile &Obj) {}
+
+ private:
+ virtual void anchor();
+
+ bool FinalizationLocked = false;
+ };
+
+ /// \brief Construct a RuntimeDyld instance.
+ RuntimeDyld(MemoryManager &MemMgr, JITSymbolResolver &Resolver);
+ RuntimeDyld(const RuntimeDyld &) = delete;
+ RuntimeDyld &operator=(const RuntimeDyld &) = delete;
+ ~RuntimeDyld();
+
+ /// Add the referenced object file to the list of objects to be loaded and
+ /// relocated.
+ std::unique_ptr<LoadedObjectInfo> loadObject(const object::ObjectFile &O);
+
+ /// Get the address of our local copy of the symbol. This may or may not
+ /// be the address used for relocation (clients can copy the data around
+ /// and resolve relocatons based on where they put it).
+ void *getSymbolLocalAddress(StringRef Name) const;
+
+ /// Get the target address and flags for the named symbol.
+ /// This address is the one used for relocation.
+ JITEvaluatedSymbol getSymbol(StringRef Name) const;
+
+ /// Returns a copy of the symbol table. This can be used by on-finalized
+ /// callbacks to extract the symbol table before throwing away the
+ /// RuntimeDyld instance. Because the map keys (StringRefs) are backed by
+ /// strings inside the RuntimeDyld instance, the map should be processed
+ /// before the RuntimeDyld instance is discarded.
+ std::map<StringRef, JITEvaluatedSymbol> getSymbolTable() const;
+
+ /// Resolve the relocations for all symbols we currently know about.
+ void resolveRelocations();
+
+ /// Map a section to its target address space value.
+ /// Map the address of a JIT section as returned from the memory manager
+ /// to the address in the target process as the running code will see it.
+ /// This is the address which will be used for relocation resolution.
+ void mapSectionAddress(const void *LocalAddress, uint64_t TargetAddress);
+
+ /// Register any EH frame sections that have been loaded but not previously
+ /// registered with the memory manager. Note, RuntimeDyld is responsible
+ /// for identifying the EH frame and calling the memory manager with the
+ /// EH frame section data. However, the memory manager itself will handle
+ /// the actual target-specific EH frame registration.
+ void registerEHFrames();
+
+ void deregisterEHFrames();
+
+ bool hasError();
+ StringRef getErrorString();
+
+ /// By default, only sections that are "required for execution" are passed to
+ /// the RTDyldMemoryManager, and other sections are discarded. Passing 'true'
+ /// to this method will cause RuntimeDyld to pass all sections to its
+ /// memory manager regardless of whether they are "required to execute" in the
+ /// usual sense. This is useful for inspecting metadata sections that may not
+ /// contain relocations, E.g. Debug info, stackmaps.
+ ///
+ /// Must be called before the first object file is loaded.
+ void setProcessAllSections(bool ProcessAllSections) {
+ assert(!Dyld && "setProcessAllSections must be called before loadObject.");
+ this->ProcessAllSections = ProcessAllSections;
+ }
+
+ /// Perform all actions needed to make the code owned by this RuntimeDyld
+ /// instance executable:
+ ///
+ /// 1) Apply relocations.
+ /// 2) Register EH frames.
+ /// 3) Update memory permissions*.
+ ///
+ /// * Finalization is potentially recursive**, and the 3rd step will only be
+ /// applied by the outermost call to finalize. This allows different
+ /// RuntimeDyld instances to share a memory manager without the innermost
+ /// finalization locking the memory and causing relocation fixup errors in
+ /// outer instances.
+ ///
+ /// ** Recursive finalization occurs when one RuntimeDyld instances needs the
+ /// address of a symbol owned by some other instance in order to apply
+ /// relocations.
+ ///
+ void finalizeWithMemoryManagerLocking();
+
+private:
+ // RuntimeDyldImpl is the actual class. RuntimeDyld is just the public
+ // interface.
+ std::unique_ptr<RuntimeDyldImpl> Dyld;
+ MemoryManager &MemMgr;
+ JITSymbolResolver &Resolver;
+ bool ProcessAllSections;
+ RuntimeDyldCheckerImpl *Checker;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_RUNTIMEDYLD_H
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/RuntimeDyldChecker.h b/linux-x64/clang/include/llvm/ExecutionEngine/RuntimeDyldChecker.h
new file mode 100644
index 0000000..de89f40
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/RuntimeDyldChecker.h
@@ -0,0 +1,112 @@
+//===---- RuntimeDyldChecker.h - RuntimeDyld tester framework -----*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_RUNTIMEDYLDCHECKER_H
+#define LLVM_EXECUTIONENGINE_RUNTIMEDYLDCHECKER_H
+
+#include "llvm/ADT/Optional.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <utility>
+
+namespace llvm {
+
+class StringRef;
+class MCDisassembler;
+class MemoryBuffer;
+class MCInstPrinter;
+class RuntimeDyld;
+class RuntimeDyldCheckerImpl;
+class raw_ostream;
+
+/// \brief RuntimeDyld invariant checker for verifying that RuntimeDyld has
+/// correctly applied relocations.
+///
+/// The RuntimeDyldChecker class evaluates expressions against an attached
+/// RuntimeDyld instance to verify that relocations have been applied
+/// correctly.
+///
+/// The expression language supports basic pointer arithmetic and bit-masking,
+/// and has limited disassembler integration for accessing instruction
+/// operands and the next PC (program counter) address for each instruction.
+///
+/// The language syntax is:
+///
+/// check = expr '=' expr
+///
+/// expr = binary_expr
+/// | sliceable_expr
+///
+/// sliceable_expr = '*{' number '}' load_addr_expr [slice]
+/// | '(' expr ')' [slice]
+/// | ident_expr [slice]
+/// | number [slice]
+///
+/// slice = '[' high-bit-index ':' low-bit-index ']'
+///
+/// load_addr_expr = symbol
+/// | '(' symbol '+' number ')'
+/// | '(' symbol '-' number ')'
+///
+/// ident_expr = 'decode_operand' '(' symbol ',' operand-index ')'
+/// | 'next_pc' '(' symbol ')'
+/// | 'stub_addr' '(' file-name ',' section-name ',' symbol ')'
+/// | symbol
+///
+/// binary_expr = expr '+' expr
+/// | expr '-' expr
+/// | expr '&' expr
+/// | expr '|' expr
+/// | expr '<<' expr
+/// | expr '>>' expr
+///
+class RuntimeDyldChecker {
+public:
+ RuntimeDyldChecker(RuntimeDyld &RTDyld, MCDisassembler *Disassembler,
+ MCInstPrinter *InstPrinter, raw_ostream &ErrStream);
+ ~RuntimeDyldChecker();
+
+ // \brief Get the associated RTDyld instance.
+ RuntimeDyld& getRTDyld();
+
+ // \brief Get the associated RTDyld instance.
+ const RuntimeDyld& getRTDyld() const;
+
+ /// \brief Check a single expression against the attached RuntimeDyld
+ /// instance.
+ bool check(StringRef CheckExpr) const;
+
+ /// \brief Scan the given memory buffer for lines beginning with the string
+ /// in RulePrefix. The remainder of the line is passed to the check
+ /// method to be evaluated as an expression.
+ bool checkAllRulesInBuffer(StringRef RulePrefix, MemoryBuffer *MemBuf) const;
+
+ /// \brief Returns the address of the requested section (or an error message
+ /// in the second element of the pair if the address cannot be found).
+ ///
+ /// if 'LocalAddress' is true, this returns the address of the section
+ /// within the linker's memory. If 'LocalAddress' is false it returns the
+ /// address within the target process (i.e. the load address).
+ std::pair<uint64_t, std::string> getSectionAddr(StringRef FileName,
+ StringRef SectionName,
+ bool LocalAddress);
+
+ /// \brief If there is a section at the given local address, return its load
+ /// address, otherwise return none.
+ Optional<uint64_t> getSectionLoadAddress(void *LocalAddress) const;
+
+private:
+ std::unique_ptr<RuntimeDyldCheckerImpl> Impl;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ExecutionEngine/SectionMemoryManager.h b/linux-x64/clang/include/llvm/ExecutionEngine/SectionMemoryManager.h
new file mode 100644
index 0000000..d76e371
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ExecutionEngine/SectionMemoryManager.h
@@ -0,0 +1,193 @@
+//===- SectionMemoryManager.h - Memory manager for MCJIT/RtDyld -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of a section-based memory manager used by
+// the MCJIT execution engine and RuntimeDyld.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_SECTIONMEMORYMANAGER_H
+#define LLVM_EXECUTIONENGINE_SECTIONMEMORYMANAGER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/Support/Memory.h"
+#include <cstdint>
+#include <string>
+#include <system_error>
+
+namespace llvm {
+
+/// This is a simple memory manager which implements the methods called by
+/// the RuntimeDyld class to allocate memory for section-based loading of
+/// objects, usually those generated by the MCJIT execution engine.
+///
+/// This memory manager allocates all section memory as read-write. The
+/// RuntimeDyld will copy JITed section memory into these allocated blocks
+/// and perform any necessary linking and relocations.
+///
+/// Any client using this memory manager MUST ensure that section-specific
+/// page permissions have been applied before attempting to execute functions
+/// in the JITed object. Permissions can be applied either by calling
+/// MCJIT::finalizeObject or by calling SectionMemoryManager::finalizeMemory
+/// directly. Clients of MCJIT should call MCJIT::finalizeObject.
+class SectionMemoryManager : public RTDyldMemoryManager {
+public:
+ /// This enum describes the various reasons to allocate pages from
+ /// allocateMappedMemory.
+ enum class AllocationPurpose {
+ Code,
+ ROData,
+ RWData,
+ };
+
+ /// Implementations of this interface are used by SectionMemoryManager to
+ /// request pages from the operating system.
+ class MemoryMapper {
+ public:
+ /// This method attempts to allocate \p NumBytes bytes of virtual memory for
+ /// \p Purpose. \p NearBlock may point to an existing allocation, in which
+ /// case an attempt is made to allocate more memory near the existing block.
+ /// The actual allocated address is not guaranteed to be near the requested
+ /// address. \p Flags is used to set the initial protection flags for the
+ /// block of the memory. \p EC [out] returns an object describing any error
+ /// that occurs.
+ ///
+ /// This method may allocate more than the number of bytes requested. The
+ /// actual number of bytes allocated is indicated in the returned
+ /// MemoryBlock.
+ ///
+ /// The start of the allocated block must be aligned with the system
+ /// allocation granularity (64K on Windows, page size on Linux). If the
+ /// address following \p NearBlock is not so aligned, it will be rounded up
+ /// to the next allocation granularity boundary.
+ ///
+ /// \r a non-null MemoryBlock if the function was successful, otherwise a
+ /// null MemoryBlock with \p EC describing the error.
+ virtual sys::MemoryBlock
+ allocateMappedMemory(AllocationPurpose Purpose, size_t NumBytes,
+ const sys::MemoryBlock *const NearBlock,
+ unsigned Flags, std::error_code &EC) = 0;
+
+ /// This method sets the protection flags for a block of memory to the state
+ /// specified by \p Flags. The behavior is not specified if the memory was
+ /// not allocated using the allocateMappedMemory method.
+ /// \p Block describes the memory block to be protected.
+ /// \p Flags specifies the new protection state to be assigned to the block.
+ ///
+ /// If \p Flags is MF_WRITE, the actual behavior varies with the operating
+ /// system (i.e. MF_READ | MF_WRITE on Windows) and the target architecture
+ /// (i.e. MF_WRITE -> MF_READ | MF_WRITE on i386).
+ ///
+ /// \r error_success if the function was successful, or an error_code
+ /// describing the failure if an error occurred.
+ virtual std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
+ unsigned Flags) = 0;
+
+ /// This method releases a block of memory that was allocated with the
+ /// allocateMappedMemory method. It should not be used to release any memory
+ /// block allocated any other way.
+ /// \p Block describes the memory to be released.
+ ///
+ /// \r error_success if the function was successful, or an error_code
+ /// describing the failure if an error occurred.
+ virtual std::error_code releaseMappedMemory(sys::MemoryBlock &M) = 0;
+
+ virtual ~MemoryMapper();
+ };
+
+ /// Creates a SectionMemoryManager instance with \p MM as the associated
+ /// memory mapper. If \p MM is nullptr then a default memory mapper is used
+ /// that directly calls into the operating system.
+ SectionMemoryManager(MemoryMapper *MM = nullptr);
+ SectionMemoryManager(const SectionMemoryManager &) = delete;
+ void operator=(const SectionMemoryManager &) = delete;
+ ~SectionMemoryManager() override;
+
+ /// \brief Allocates a memory block of (at least) the given size suitable for
+ /// executable code.
+ ///
+ /// The value of \p Alignment must be a power of two. If \p Alignment is zero
+ /// a default alignment of 16 will be used.
+ uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) override;
+
+ /// \brief Allocates a memory block of (at least) the given size suitable for
+ /// executable code.
+ ///
+ /// The value of \p Alignment must be a power of two. If \p Alignment is zero
+ /// a default alignment of 16 will be used.
+ uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID, StringRef SectionName,
+ bool isReadOnly) override;
+
+ /// \brief Update section-specific memory permissions and other attributes.
+ ///
+ /// This method is called when object loading is complete and section page
+ /// permissions can be applied. It is up to the memory manager implementation
+ /// to decide whether or not to act on this method. The memory manager will
+ /// typically allocate all sections as read-write and then apply specific
+ /// permissions when this method is called. Code sections cannot be executed
+ /// until this function has been called. In addition, any cache coherency
+ /// operations needed to reliably use the memory are also performed.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool finalizeMemory(std::string *ErrMsg = nullptr) override;
+
+ /// \brief Invalidate instruction cache for code sections.
+ ///
+ /// Some platforms with separate data cache and instruction cache require
+ /// explicit cache flush, otherwise JIT code manipulations (like resolved
+ /// relocations) will get to the data cache but not to the instruction cache.
+ ///
+ /// This method is called from finalizeMemory.
+ virtual void invalidateInstructionCache();
+
+private:
+ struct FreeMemBlock {
+ // The actual block of free memory
+ sys::MemoryBlock Free;
+ // If there is a pending allocation from the same reservation right before
+ // this block, store it's index in PendingMem, to be able to update the
+ // pending region if part of this block is allocated, rather than having to
+ // create a new one
+ unsigned PendingPrefixIndex;
+ };
+
+ struct MemoryGroup {
+ // PendingMem contains all blocks of memory (subblocks of AllocatedMem)
+ // which have not yet had their permissions applied, but have been given
+ // out to the user. FreeMem contains all block of memory, which have
+ // neither had their permissions applied, nor been given out to the user.
+ SmallVector<sys::MemoryBlock, 16> PendingMem;
+ SmallVector<FreeMemBlock, 16> FreeMem;
+
+ // All memory blocks that have been requested from the system
+ SmallVector<sys::MemoryBlock, 16> AllocatedMem;
+
+ sys::MemoryBlock Near;
+ };
+
+ uint8_t *allocateSection(AllocationPurpose Purpose, uintptr_t Size,
+ unsigned Alignment);
+
+ std::error_code applyMemoryGroupPermissions(MemoryGroup &MemGroup,
+ unsigned Permissions);
+
+ MemoryGroup CodeMem;
+ MemoryGroup RWDataMem;
+ MemoryGroup RODataMem;
+ MemoryMapper &MMapper;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTION_ENGINE_SECTION_MEMORY_MANAGER_H