1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/compiler/xla/service/compiler.h"
17
18 #include <string>
19 #include <utility>
20
21 #include "tensorflow/compiler/xla/types.h"
22 #include "tensorflow/compiler/xla/util.h"
23 #include "tensorflow/core/platform/logging.h"
24 #include "tensorflow/core/platform/macros.h"
25
26 namespace xla {
27
28 /* static */ tensorflow::mutex Compiler::platform_compiler_mutex_(
29 tensorflow::LINKER_INITIALIZED);
30
31 std::vector<std::unique_ptr<tensorflow::protobuf::Message>>
ComputeBackendConfigs(const HloInstruction & hlo,se::StreamExecutor * executor) const32 Compiler::ComputeBackendConfigs(const HloInstruction& hlo,
33 se::StreamExecutor* executor) const {
34 CHECK(executor != nullptr);
35 return {};
36 }
37
38 std::unique_ptr<tensorflow::protobuf::Message>
ComputeDefaultBackendConfig(const HloInstruction & hlo,se::StreamExecutor * executor) const39 Compiler::ComputeDefaultBackendConfig(const HloInstruction& hlo,
40 se::StreamExecutor* executor) const {
41 CHECK(executor != nullptr);
42 return nullptr;
43 }
44
45 // Define a default version where metadata is not used.
46 StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>>
CompileAheadOfTime(std::unique_ptr<HloModuleGroup> module_group,const AotCompilationOptions & options,std::unique_ptr<AotCompilationMetadata> * metadata)47 Compiler::CompileAheadOfTime(
48 std::unique_ptr<HloModuleGroup> module_group,
49 const AotCompilationOptions& options,
50 std::unique_ptr<AotCompilationMetadata>* metadata) {
51 if (metadata != nullptr) {
52 return Unimplemented(
53 "Populating AotCompilationMetadata is not implemented on this "
54 "compiler.");
55 }
56 return CompileAheadOfTime(std::move(module_group), options);
57 }
58
59 /* static */ std::map<se::Platform::Id, Compiler::CompilerFactory>*
GetPlatformCompilerFactories()60 Compiler::GetPlatformCompilerFactories() {
61 static auto* r = new std::map<se::Platform::Id, CompilerFactory>;
62 return r;
63 }
64
65 /* static */
66 std::map<se::Platform::Id, std::unique_ptr<Compiler>>*
GetPlatformCompilers()67 Compiler::GetPlatformCompilers() {
68 static auto* r = new std::map<se::Platform::Id, std::unique_ptr<Compiler>>;
69 return r;
70 }
71
RegisterCompilerFactory(se::Platform::Id platform_id,std::function<std::unique_ptr<Compiler> ()> compiler_factory)72 /* static */ void Compiler::RegisterCompilerFactory(
73 se::Platform::Id platform_id,
74 std::function<std::unique_ptr<Compiler>()> compiler_factory) {
75 tensorflow::mutex_lock lock(platform_compiler_mutex_);
76 auto* factories = GetPlatformCompilerFactories();
77 CHECK(factories->find(platform_id) == factories->end())
78 << "Compiler factory already registered for platform";
79 (*factories)[platform_id] = std::move(compiler_factory);
80 }
81
GetForPlatform(const se::Platform * platform)82 /* static */ StatusOr<Compiler*> Compiler::GetForPlatform(
83 const se::Platform* platform) {
84 tensorflow::mutex_lock lock(platform_compiler_mutex_);
85
86 auto* compilers = GetPlatformCompilers();
87 // See if we already instantiated a compiler for this platform.
88 {
89 auto it = compilers->find(platform->id());
90 if (it != compilers->end()) {
91 return it->second.get();
92 }
93
94 // If not, we just fall through to try to create one with a registered
95 // factory.
96 }
97
98 auto* factories = GetPlatformCompilerFactories();
99 auto it = factories->find(platform->id());
100 if (it == factories->end()) {
101 string hint;
102 if (platform->Name() == "Host") {
103 hint =
104 " (hint: try adding tensorflow/compiler/jit:xla_cpu_jit as a "
105 "dependency)";
106 } else if (platform->Name() == "CUDA") {
107 hint =
108 " (hint: try adding tensorflow/compiler/jit:xla_gpu_jit as a "
109 "dependency)";
110 }
111
112 return NotFound(
113 "could not find registered compiler for platform %s -- check "
114 "target linkage%s",
115 platform->Name(), hint);
116 }
117
118 // And then we invoke the factory, placing the result into the mapping.
119 compilers->insert(std::make_pair(platform->id(), it->second()));
120 return compilers->at(platform->id()).get();
121 }
122
AotCompilationOptions()123 AotCompilationOptions::AotCompilationOptions()
124 : debug_options_(GetDebugOptionsFromFlags()) {}
125
126 } // namespace xla
127