• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7    http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16syntax = "proto3";
17
18package tensorflow;
19
20import "tensorflow/compiler/xla/service/hlo.proto";
21import "tensorflow/compiler/xla/xla_data.proto";
22import "tensorflow/core/framework/tensor_shape.proto";
23
24// A serialization of TPUExecutable. Only includes fields necessary to load
25// and execute a program on a worker node.
26message TPUExecutableInfoProto {
27  reserved 1;
28
29  // The shapes of the inputs and outputs.
30  repeated xla.ShapeProto input_shapes = 2;
31  reserved 7;  // was input_shape
32  xla.ShapeProto output_shape = 3;
33
34  message UpdateIndexPair {
35    int32 index = 1;
36    bool updated = 2;
37  }
38
39  message ShapeIndex {
40    repeated int32 index = 1;
41  }
42
43  // Dynamic output indices indicate which outputs have dynamic dimensions.
44  repeated ShapeIndex dynamic_output_indices = 11;
45
46  // For each resource variable output, what was the index of the corresponding
47  // input and was it updated? The indices are sorted by input order.
48  repeated UpdateIndexPair variable_indices = 10;
49
50  // The shapes of the outputs when represented as Tensors. These may not
51  // match the output_shape values because we may flatten tensors to avoid
52  // excess padding.
53  repeated TensorShapeProto output_tensor_shapes = 8;
54
55  reserved 4;
56
57  // Optional session module for passing XLA computations between TPUCompileOp
58  // and TPUExecuteOp. This is needed to support the
59  // --xla_dump_hlo_snapshots flag.
60  xla.HloSnapshot session_module = 5;
61
62  // The physical device ids assigned to the replicated cores.
63  xla.DeviceAssignmentProto device_assignment = 6;
64}
65
66// Metadata for a data transfer between device and host.
67message TPUHostTransferProto {
68  enum TransferDirection {
69    NONE = 0;
70    DEVICE_TO_HOST = 1;
71    HOST_TO_DEVICE = 2;
72  }
73  // Channel identifier assigned by compiler and used in host commands.
74  int64 channel = 1;
75  // Direction of the transfer operation.
76  TransferDirection direction = 2;
77  // Channel identifier prodided by XLA client.
78  string key = 3;
79  // Depth of nested loops for this transfer operation.
80  int64 nested_while_level = 4;
81  // Shape of the data to be transferred (including layout).
82  xla.ShapeProto shape = 5;
83  // Address of the device buffer in HBM (byte offset).
84  int64 buffer_offset = 6;
85  // Original data type for this host transfer before X64 rewrite.
86  xla.PrimitiveType original_type = 7;
87  // If this host transfer is a splitted X64 transfer, sepcifies whether this
88  // transfer is for lower bits.
89  bool is_lower_bits = 8;
90}
91
92message TPUHostTransferInfoProto {
93  repeated TPUHostTransferProto host_transfers = 1;
94}
95