• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7    http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16// XLA service API.
17//
18// Users 1) build up computations and 2) create allocations via this API.
19// Computations are composed of data flowing between arbitrarily-sized
20// vector-oriented operations.
21//
22// Users build up computations using a ComputationHandle, and talk about
23// allocations using GlobalDataHandles.
24//
25// There are currently no checkpointing capabilities or distribution/replication
26// guarantees. The service runs on a single machine (e.g. one task) and that is
27// its failure domain.
28//
29// Canonical example of "alpha * X + Y":
30// * Make a computation.
31// * Add alpha and X and Y as parameters.
32// * Request the multiplication of alpha and X.
33// * Request the addition of that result and Y.
34//
35// Then, pass the computation and appropriately shaped inputs to the XLA
36// service's Execute method, which provides a result as a GlobalDataHandle.
37//
38// All data in XLA computations are conceptually immutable.
39//
40// Note: this API is subject to change / refinement over time -- use the
41// provided client libraries to insulate code from changes to this service API.
42
43syntax = "proto3";
44
45import "tensorflow/compiler/xla/xla.proto";
46
47package xla;
48
49service XlaService {
50  /////////////////////////
51  // Global data requests
52
53  // Unregisters a global allocation.
54  //
55  // If the handle given is not currently allocated, a NOT_FOUND status is
56  // returned.
57  rpc Unregister(UnregisterRequest) returns (UnregisterResponse) {
58  }
59
60  // Deconstructs a tuple. Returns a newly created GlobalDataHandle for each
61  // element in the tuple.
62  rpc DeconstructTuple(DeconstructTupleRequest)
63      returns (DeconstructTupleResponse) {
64  }
65
66  // Unpack requests that a global data handle, with a tuple shape, has global
67  // data handles created for each of its constituent members. This is the
68  // equivalent of the "destructuring assignment" present in various programming
69  // languages.
70  rpc Unpack(UnpackRequest) returns (UnpackResponse) {
71  }
72
73  // Requests the shape of the referenced global data.
74  rpc GetShape(GetShapeRequest) returns (GetShapeResponse) {
75  }
76
77  // Requests the statistics of the given computation.
78  rpc GetComputationGraphStats(ComputationGraphStatsRequest)
79      returns (ComputationStatsResponse) {
80  }
81
82  // Loads a variable number of values with a given element type from ColumnIO.
83  rpc LoadData(LoadDataRequest) returns (LoadDataResponse) {
84  }
85
86  // Transfers the given global data to the client in the form of a Literal.
87  rpc TransferToClient(TransferToClientRequest)
88      returns (TransferToClientResponse) {
89  }
90
91  // Transfers the given literal to the server to be stored in a global
92  // allocation, which is returned.
93  rpc TransferToServer(TransferToServerRequest)
94      returns (TransferToServerResponse) {
95  }
96
97  // Transfers the given literal to the Infeed buffer of the device.
98  rpc TransferToInfeed(TransferToInfeedRequest)
99      returns (TransferToInfeedResponse) {
100  }
101
102  // Transferred literal from the Outfeed buffer of the device.
103  rpc TransferFromOutfeed(TransferFromOutfeedRequest)
104      returns (TransferFromOutfeedResponse) {
105  }
106
107  // Resets the device, clearing all existing state on the device.
108  rpc ResetDevice(ResetDeviceRequest) returns (ResetDeviceResponse) {
109  }
110
111  // Computes the value of a constant expression. The request contains the
112  // computation graph for the constant expression.
113  rpc ComputeConstantGraph(ComputeConstantGraphRequest)
114      returns (ComputeConstantResponse) {
115  }
116
117  // Requests one or more device handles from the target. The returned device
118  // handles can be used to specify the device on which to execute computations
119  // or transfer data.
120  rpc GetDeviceHandles(GetDeviceHandlesRequest)
121      returns (GetDeviceHandlesResponse) {
122  }
123
124  // Creates a channel handle that can be used to transfer data between
125  // two computations via a pair of Send and Recv instructions.
126  rpc CreateChannelHandle(CreateChannelHandleRequest)
127      returns (CreateChannelHandleResponse) {
128  }
129
130  // Compiles the provided computation into executable. Returns the handle of
131  // the executable.
132  rpc Compile(CompileRequest) returns (CompileResponse) {}
133
134  // Invokes the provided executable with the provided global data passed as
135  // immutable arguments. The request contains the handle to the executable.
136  // Returns global data output and execution timing.
137  rpc Execute(ExecuteRequest) returns (ExecuteResponse) {}
138
139  // Invokes the provided list of computations in parallel with the provided
140  // global data for each computation. Returns a list of global data output and
141  // execution timing.
142  rpc ExecuteGraphParallel(ExecuteGraphParallelRequest)
143      returns (ExecuteParallelResponse) {
144  }
145
146  // Waits until the given execution (aysnchronously launched) is complete, and
147  // returns the global data output.
148  rpc WaitForExecution(WaitForExecutionRequest)
149      returns (WaitForExecutionResponse) {
150  }
151}
152