• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_CPU_CPU_COMPILER_H_
17 #define TENSORFLOW_COMPILER_XLA_SERVICE_CPU_CPU_COMPILER_H_
18 
19 #include <memory>
20 
21 #include "absl/types/span.h"
22 #include "llvm/Target/TargetMachine.h"
23 #include "tensorflow/compiler/tf2xla/cpu_function_runtime.h"
24 #include "tensorflow/compiler/xla/service/cpu/target_machine_features.h"
25 #include "tensorflow/compiler/xla/service/executable.h"
26 #include "tensorflow/compiler/xla/service/hlo_module.h"
27 #include "tensorflow/compiler/xla/service/llvm_compiler.h"
28 #include "tensorflow/compiler/xla/statusor.h"
29 #include "tensorflow/core/platform/macros.h"
30 #include "tensorflow/core/platform/stream_executor_no_cuda.h"
31 
32 namespace xla {
33 namespace cpu {
34 
35 // This class wraps the configurability options that LLVM exposes including: the
36 // target triple, the target cpu and the target features.  It also includes the
37 // desired linkage name for the computation entry point.
38 class CpuAotCompilationOptions : public AotCompilationOptions {
39  public:
40   // Relocation models available for compilation.
41   enum class RelocationModel {
42     // Corresponds to the -fno-pic compiler option.
43     Static,
44     // Corresponds to the -fpic compiler option.
45     SmallPic,
46     // Corresponds to the -fPIC compiler option.
47     BigPic,
48     // Corresponds to the -fpie compiler option.
49     SmallPie,
50     // Corresponds to the -fPIE compiler option.
51     BigPie
52   };
53 
54   CpuAotCompilationOptions(string triple, string cpu_name, string features,
55                            string entry_point_name,
56                            RelocationModel relocation_model);
57   ~CpuAotCompilationOptions() override;
58 
59   se::Platform::Id PlatformId() const override;
60 
61   // The triple used for compilation, similar to clang's -target flag.
triple()62   const string& triple() const { return triple_; }
63   // The CPU name used for compilation, similar to clang's -mcpu flag.
cpu_name()64   const string& cpu_name() const { return cpu_name_; }
65   // The target features used for compilation ("+avx2", "+neon", etc).
features()66   const string& features() const { return features_; }
67   // The name to be used for the compiled code's entry point.
entry_point_name()68   const string& entry_point_name() const { return entry_point_name_; }
69   // The relocation model used for compilation.
relocation_model()70   RelocationModel relocation_model() const { return relocation_model_; }
71 
72  private:
73   const string triple_;
74   const string cpu_name_;
75   const string features_;
76   const string entry_point_name_;
77   const RelocationModel relocation_model_;
78 };
79 
80 class CpuAotCompilationResult : public AotCompilationResult {
81  public:
82   CpuAotCompilationResult(
83       ObjectFileData object_file_data,
84       std::vector<::tensorflow::cpu_function_runtime::BufferInfo> buffer_infos,
85       int64 result_buffer_index,
86       std::unique_ptr<HloProfilePrinterData> hlo_profile_printer_data);
87   ~CpuAotCompilationResult();
88 
hlo_profile_printer_data()89   HloProfilePrinterData* hlo_profile_printer_data() const {
90     return hlo_profile_printer_data_.get();
91   }
92 
object_file_data()93   const ObjectFileData& object_file_data() const { return object_file_data_; }
94   const std::vector<::tensorflow::cpu_function_runtime::BufferInfo>&
buffer_infos()95   buffer_infos() const {
96     return buffer_infos_;
97   }
result_buffer_index()98   int64 result_buffer_index() const { return result_buffer_index_; }
99 
100  private:
101   // Contains the compiled computation: an object file.
102   const ObjectFileData object_file_data_;
103 
104   // A list of BufferInfo objects describing the buffers used by the XLA
105   // computation.
106   const std::vector<::tensorflow::cpu_function_runtime::BufferInfo>
107       buffer_infos_;
108 
109   // Contains which buffer index into |buffer_sizes| was designated to the
110   // result of the computation.  This buffer should be passed into the output
111   // parameter when calling the compiled computation.
112   const int64 result_buffer_index_;
113 
114   // Contains an instance of HloProfilePrinterData if HLO profiling is enabled,
115   // otherwise is nullptr.
116   std::unique_ptr<HloProfilePrinterData> hlo_profile_printer_data_;
117 };
118 
119 // CPU-targeting implementation of the XLA Compiler interface.
120 //
121 // The compiler translates XLA HLO code into LLVM IR and uses LLVM's JIT
122 // infrastructure to create an executable "blob" that can then be returned
123 // wrapped in CpuExecutable and actually invoked.
124 class CpuCompiler : public LLVMCompiler {
125  public:
126   CpuCompiler();
~CpuCompiler()127   ~CpuCompiler() override {}
128 
129   // Bring in
130   // StatusOr<std::vector<std::unique_ptr<Executable>>> Compile(
131   //     std::vector<std::unique_ptr<HloModule>> modules,
132   //     std::vector<std::vector<se::StreamExecutor*>>
133   //        stream_execs)
134   using LLVMCompiler::Compile;
135 
136   StatusOr<std::unique_ptr<HloModule>> RunHloPasses(
137       std::unique_ptr<HloModule> module, se::StreamExecutor* stream_exec,
138       DeviceMemoryAllocator* device_allocator) override;
139 
140   StatusOr<std::unique_ptr<Executable>> RunBackend(
141       std::unique_ptr<HloModule> module, se::StreamExecutor* stream_exec,
142       DeviceMemoryAllocator* device_allocator) override;
143 
144   StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>>
145   CompileAheadOfTime(std::unique_ptr<HloModuleGroup> module_group,
146                      const AotCompilationOptions& options) override;
147 
148   se::Platform::Id PlatformId() const override;
149 
150   HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const override;
151 
152  private:
153   // Initialize the LLVM target.
154   static void InitializeLLVMTarget();
155 
156   // Runs the HLO passes which are necessary for both optimizations and
157   // correctness.
158   Status RunHloPasses(HloModule* module, bool is_aot_compile,
159                       llvm::TargetMachine* target_machine);
160 
161   // Runs HLO passes up to and including layout assignment.
162   Status RunHloPassesThroughLayoutAssn(
163       HloModule* module, bool /*is_aot_compile*/,
164       LLVMTargetMachineFeatures* target_machine_features);
165 
166   // Runs HLO passes after layout assignment.
167   Status RunHloPassesAfterLayoutAssn(
168       HloModule* module, bool is_aot_compile,
169       LLVMTargetMachineFeatures* target_machine_features);
170 
171   TF_DISALLOW_COPY_AND_ASSIGN(CpuCompiler);
172 };
173 
174 }  // namespace cpu
175 }  // namespace xla
176 
177 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_CPU_CPU_COMPILER_H_
178