• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_COMPILE_ONLY_SERVICE_H_
17 #define TENSORFLOW_COMPILER_XLA_SERVICE_COMPILE_ONLY_SERVICE_H_
18 
19 #include "tensorflow/compiler/xla/service/backend.h"
20 #include "tensorflow/compiler/xla/service/compiler.h"
21 #include "tensorflow/compiler/xla/service/service.h"
22 #include "tensorflow/compiler/xla/statusor.h"
23 #include "tensorflow/compiler/xla/xla_data.pb.h"
24 #include "tensorflow/core/platform/stream_executor_no_cuda.h"
25 
26 namespace xla {
27 
28 // An XLA Service specialization for ahead-of-time compilation.  This only
29 // instantiates a Compiler object for the relevant platform; it does not
30 // instantiate or require an execution backend.
31 class CompileOnlyService : public Service {
32  public:
33   // Factory for creating a CompileOnlyService. The parameter platform is the
34   // platform that the service should target. If platform is null then the
35   // default platform is used.
36   static StatusOr<std::unique_ptr<CompileOnlyService>> NewService(
37       perftools::gputools::Platform* platform);
38   static StatusOr<std::unique_ptr<CompileOnlyService>> NewService(
39       const ServiceOptions& options);
40 
41   // A description of a computation to compile using CompileAheadOfTime.
42   struct AotComputationInstance {
43     ComputationHandle computation;
44     std::vector<const Shape*> argument_layouts;
45     const Shape* result_layout = nullptr;
46   };
47 
48   // Compiles a list of computations for ahead-of-time execution.  This is
49   // intended for use in static compilation.  See
50   // |CompileOnlyClient::CompileAheadOfTime| for additional details.
51   StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>>
52   CompileAheadOfTime(
53       const tensorflow::gtl::ArraySlice<AotComputationInstance> computations,
54       const AotCompilationOptions& Options);
55 
56   // Override Service methods that require or imply the existence of an
57   // execute backend.  Note that this does not include TransferToClient, as
58   // computing constants produces global data that we may wish to transfer.
Execute(const ExecuteRequest * arg,ExecuteResponse * result)59   tensorflow::Status Execute(const ExecuteRequest* arg,
60                              ExecuteResponse* result) override {
61     return Unimplemented("CompileOnlyService does not support execution.");
62   }
ExecuteParallel(const ExecuteParallelRequest * arg,ExecuteParallelResponse * result)63   tensorflow::Status ExecuteParallel(const ExecuteParallelRequest* arg,
64                                      ExecuteParallelResponse* result) override {
65     return Unimplemented("CompileOnlyService does not support execution.");
66   }
GetDeviceHandles(const GetDeviceHandlesRequest * arg,GetDeviceHandlesResponse * result)67   tensorflow::Status GetDeviceHandles(
68       const GetDeviceHandlesRequest* arg,
69       GetDeviceHandlesResponse* result) override {
70     return Unimplemented("CompileOnlyService does not support devices.");
71   }
ExecuteAsync(const ExecuteAsyncRequest * arg,ExecuteAsyncResponse * result)72   tensorflow::Status ExecuteAsync(const ExecuteAsyncRequest* arg,
73                                   ExecuteAsyncResponse* result) override {
74     return Unimplemented("CompileOnlyService does not support execution.");
75   }
WaitForExecution(const WaitForExecutionRequest * arg,WaitForExecutionResponse * result)76   tensorflow::Status WaitForExecution(
77       const WaitForExecutionRequest* arg,
78       WaitForExecutionResponse* result) override {
79     return Unimplemented("CompileOnlyService does not support execution.");
80   }
TransferToServer(const TransferToServerRequest * arg,TransferToServerResponse * result)81   tensorflow::Status TransferToServer(
82       const TransferToServerRequest* arg,
83       TransferToServerResponse* result) override {
84     return Unimplemented(
85         "CompileOnlyService does not support device data transfers.");
86   }
TransferToInfeed(const TransferToInfeedRequest * arg,TransferToInfeedResponse * result)87   tensorflow::Status TransferToInfeed(
88       const TransferToInfeedRequest* arg,
89       TransferToInfeedResponse* result) override {
90     return Unimplemented(
91         "CompileOnlyService does not support device data transfers.");
92   }
TransferFromOutfeed(const TransferFromOutfeedRequest * arg,TransferFromOutfeedResponse * result)93   tensorflow::Status TransferFromOutfeed(
94       const TransferFromOutfeedRequest* arg,
95       TransferFromOutfeedResponse* result) override {
96     return Unimplemented(
97         "CompileOnlyService does not support device data transfers.");
98   }
ResetDevice(const ResetDeviceRequest * arg,ResetDeviceResponse * result)99   tensorflow::Status ResetDevice(const ResetDeviceRequest* arg,
100                                  ResetDeviceResponse* result) override {
101     return Unimplemented("CompileOnlyService does not support devices.");
102   }
103 
104  private:
105   explicit CompileOnlyService(const ServiceOptions& options,
106                               Compiler* compiler);
107   CompileOnlyService(const CompileOnlyService&) = delete;
108   void operator=(const CompileOnlyService&) = delete;
109 
110   // The compiler for the target platform.  This is included in place of
111   // the Service::execute_backend_'s compiler, since execute_backend_ is a
112   // nullptr in CompileOnlyService.
113   Compiler* compiler_;
114 };
115 
116 }  // namespace xla
117 
118 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_COMPILE_ONLY_SERVICE_H_
119