• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_CPU_SIMPLE_ORC_JIT_H_
17 #define TENSORFLOW_COMPILER_XLA_SERVICE_CPU_SIMPLE_ORC_JIT_H_
18 
19 #include <memory>
20 #include <string>
21 #include <vector>
22 
23 #include "llvm/ADT/Triple.h"
24 #include "llvm/ExecutionEngine/JITEventListener.h"
25 #include "llvm/ExecutionEngine/Orc/Core.h"
26 #include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
27 #include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
28 #include "llvm/ExecutionEngine/Orc/SymbolStringPool.h"
29 #include "llvm/ExecutionEngine/Orc/TargetProcessControl.h"
30 #include "llvm/IR/Module.h"
31 #include "llvm/Target/TargetMachine.h"
32 #include "tensorflow/compiler/xla/service/cpu/compiler_functor.h"
33 #include "tensorflow/compiler/xla/types.h"
34 
35 namespace xla {
36 namespace cpu {
37 
38 // Simplified LLVM JIT based on the new Orc API.
39 //
40 // This class wraps Orc's functionality into a single interface that only
41 // exposes what we need for XLA.
42 //
43 // Supports JIT-ing multiple modules but without cross-module linking.
44 // Implements eager compilation - the module is lowered to binary as soon as
45 // it's added to the JIT.
46 class SimpleOrcJIT : public llvm::JITEventListener {
47  public:
48   using ObjLayerT = llvm::orc::RTDyldObjectLinkingLayer;
49   using CompileLayerT = llvm::orc::IRCompileLayer;
50 
51   // Create a new JIT, targeting the host architecture.
52   //
53   // {pre,post}_optimization_hook is invoked on the module before/after all
54   // LLVM IR-level optimizations.  post_codegen_hook is invoked after
55   // compiling to machine code.
56   SimpleOrcJIT(
57       std::unique_ptr<llvm::orc::TargetProcessControl> target_process_control,
58       std::unique_ptr<llvm::orc::ExecutionSession> execution_session,
59       const llvm::TargetOptions& target_options,
60       llvm::CodeGenOpt::Level opt_level, bool optimize_for_size,
61       bool disable_expensive_passes, llvm::FastMathFlags fast_math_flags,
62       LLVMCompiler::ModuleHook pre_optimization_hook,
63       LLVMCompiler::ModuleHook post_optimization_hook,
64       std::function<void(const llvm::object::ObjectFile&)> post_codegen_hook);
65 
66   static llvm::Expected<std::unique_ptr<SimpleOrcJIT>> Create(
67       const llvm::TargetOptions& target_options,
68       llvm::CodeGenOpt::Level opt_level, bool optimize_for_size,
69       bool disable_expensive_passes, llvm::FastMathFlags fast_math_flags,
70       LLVMCompiler::ModuleHook pre_optimization_hook,
71       LLVMCompiler::ModuleHook post_optimization_hook,
72       std::function<void(const llvm::object::ObjectFile&)> post_codegen_hook);
73 
74   ~SimpleOrcJIT() override;
75 
data_layout()76   const llvm::DataLayout& data_layout() const { return data_layout_; }
77 
target_triple()78   const llvm::Triple& target_triple() const {
79     return target_machine_->getTargetTriple();
80   }
81 
82   llvm::Error AddModule(llvm::orc::ThreadSafeModule module);
83 
84   // Get the runtime address of the compiled symbol whose name is given. Returns
85   // nullptr if the symbol cannot be found.
86   llvm::Expected<llvm::JITEvaluatedSymbol> FindCompiledSymbol(
87       const std::string& name);
88 
target_machine()89   llvm::TargetMachine* target_machine() const { return target_machine_.get(); }
90 
91   // Creates an llvm::TargetMachine suitable for JITting code that will run on
92   // the current machine.
93   static std::unique_ptr<llvm::TargetMachine> InferTargetMachineForJIT(
94       const llvm::TargetOptions& target_options,
95       llvm::CodeGenOpt::Level opt_level);
96 
SizeOfGeneratedCodeInBytes()97   int64 SizeOfGeneratedCodeInBytes() const {
98     return size_of_generated_code_in_bytes_;
99   }
100 
101  private:
102   llvm::JITEvaluatedSymbol ResolveRuntimeSymbol(llvm::StringRef name);
103 
104   void notifyObjectLoaded(
105       llvm::JITEventListener::ObjectKey key,
106       const llvm::object::ObjectFile& object,
107       const llvm::RuntimeDyld::LoadedObjectInfo& object_info) override;
108   void notifyFreeingObject(llvm::JITEventListener::ObjectKey key) override;
109 
110   std::unique_ptr<llvm::TargetMachine> target_machine_;
111   const llvm::DataLayout data_layout_;
112   std::unique_ptr<llvm::orc::TargetProcessControl> target_process_control_;
113   std::unique_ptr<llvm::orc::ExecutionSession> execution_session_;
114   ObjLayerT object_layer_;
115   CompileLayerT compile_layer_;
116   llvm::orc::JITDylib* main_jit_dylib_;
117   int64 size_of_generated_code_in_bytes_ = 0;
118 
119   // Non owning pointer to a JIT event listener that registers the JIT events
120   // with an attached GDB.
121   //
122   // Note: we get a pointer to this event listener using
123   // `createGDBRegistrationListener` which makes it look like we're supposed to
124   // free this, but the function is poorly named and really just returns a
125   // pointer to a static object.
126   llvm::JITEventListener* gdb_jit_event_listener_;
127 };
128 
129 }  // namespace cpu
130 }  // namespace xla
131 
132 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_CPU_SIMPLE_ORC_JIT_H_
133