• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/compiler/jit/shape_inference.h"
17 
18 #include "tensorflow/compiler/jit/shape_inference_helpers.h"
19 #include "tensorflow/core/common_runtime/shape_refiner.h"
20 #include "tensorflow/core/framework/shape_inference.h"
21 #include "tensorflow/core/graph/algorithm.h"
22 #include "tensorflow/core/util/dump_graph.h"
23 
24 namespace tensorflow {
25 
26 namespace {
27 
28 // Converts a shape inference handle to a PartialTensorShape.
ShapeHandleToTensorShape(shape_inference::InferenceContext * context,const shape_inference::ShapeHandle & handle,PartialTensorShape * shape)29 Status ShapeHandleToTensorShape(shape_inference::InferenceContext* context,
30                                 const shape_inference::ShapeHandle& handle,
31                                 PartialTensorShape* shape) {
32   // The default is already unknown
33   if (!context->RankKnown(handle)) return Status::OK();
34 
35   std::vector<int64> dims(context->Rank(handle));
36   for (int32 i = 0; i < dims.size(); ++i) {
37     dims[i] = context->Value(context->Dim(handle, i));
38   }
39   return PartialTensorShape::MakePartialShape(dims.data(), dims.size(), shape);
40 }
41 
PropagateShapes(const Graph & graph,const std::map<int,InferredShape> & arg_shapes,ShapeRefiner * shape_refiner)42 Status PropagateShapes(const Graph& graph,
43                        const std::map<int, InferredShape>& arg_shapes,
44                        ShapeRefiner* shape_refiner) {
45   // Visits the nodes in topological order (reverse post-order), inferring
46   // shapes.
47   // TODO(phawkins): handle cyclic graphs.
48   std::vector<Node*> order;
49   GetReversePostOrder(graph, &order);
50 
51   for (Node* n : order) {
52     // Ignore the status returned by the shape_refiner. We want the best effort
53     // shapes, even if no shape function is registered for a node.
54     Status status = shape_refiner->AddNode(n);
55     if (!status.ok()) {
56       VLOG(1) << "Shape inference failed for node " << n->name() << ": "
57               << status;
58     } else {
59       shape_inference::InferenceContext* context = shape_refiner->GetContext(n);
60       for (int i = 0; i < n->num_outputs(); i++) {
61         shape_inference::ShapeHandle handle = context->output(i);
62         VLOG(4) << "Output " << i << " for node " << n->name() << ": "
63                 << context->DebugString(handle);
64       }
65     }
66 
67     if (n->type_string() == "_Arg") {
68       int index;
69       TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
70       auto it = arg_shapes.find(index);
71       if (it != arg_shapes.end()) {
72         const InferredShape& arg_shape = it->second;
73         shape_inference::InferenceContext* context =
74             shape_refiner->GetContext(n);
75 
76         if (arg_shape.handle_type != DT_INVALID) {
77           shape_inference::ShapeHandle handle;
78           TF_RETURN_IF_ERROR(context->MakeShapeFromPartialTensorShape(
79               arg_shape.handle_shape, &handle));
80 
81           // Sets the shape and type of the variable's value.
82           context->set_output_handle_shapes_and_types(
83               0, std::vector<shape_inference::ShapeAndType>{
84                      {handle, arg_shape.handle_type}});
85         }
86 
87         shape_inference::ShapeHandle handle;
88         TF_RETURN_IF_ERROR(
89             context->MakeShapeFromPartialTensorShape(arg_shape.shape, &handle));
90         TF_RETURN_IF_ERROR(shape_refiner->SetShape(n, 0, handle));
91       }
92     }
93   }
94   return Status::OK();
95 }
96 
97 // Store the shapes of the output tensors in a map
StoreOutputShapes(const Graph & graph,const ShapeRefiner & shape_refiner,GraphShapeInfo * shape_info)98 Status StoreOutputShapes(const Graph& graph, const ShapeRefiner& shape_refiner,
99                          GraphShapeInfo* shape_info) {
100   for (const Node* node : graph.nodes()) {
101     shape_inference::InferenceContext* context = shape_refiner.GetContext(node);
102     if (!context) continue;
103 
104     auto& outputs = (*shape_info)[node->name()];
105     outputs.resize(context->num_outputs());
106     for (int i = 0; i < context->num_outputs(); ++i) {
107       auto& output = outputs[i];
108       TF_RETURN_IF_ERROR(
109           ShapeHandleToTensorShape(context, context->output(i), &output.shape));
110 
111       const auto* handle_shapes_and_types =
112           context->output_handle_shapes_and_types(i);
113       if (handle_shapes_and_types != nullptr) {
114         if (handle_shapes_and_types->size() == 1) {
115           TF_RETURN_IF_ERROR(ShapeHandleToTensorShape(
116               context, (*handle_shapes_and_types)[0].shape,
117               &output.handle_shape));
118           output.handle_type = (*handle_shapes_and_types)[0].dtype;
119         } else {
120           // otherwise, it may be resource like a Queue, which can have
121           // multiple shapes and types represented by a single handle.
122         }
123       }
124       VLOG(4) << node->name() << " output " << i << " shape"
125               << output.shape.DebugString() << " handle_type "
126               << DataTypeString(output.handle_type) << " handle_shape "
127               << output.handle_shape.DebugString();
128     }
129   }
130   return Status::OK();
131 }
132 
133 }  // namespace
134 
InferShapes(Graph * graph,const std::map<int,InferredShape> & arg_shapes,const tensorflow::FunctionLibraryDefinition * fnlib_def,GraphShapeInfo * shape_info)135 Status InferShapes(Graph* graph, const std::map<int, InferredShape>& arg_shapes,
136                    const tensorflow::FunctionLibraryDefinition* fnlib_def,
137                    GraphShapeInfo* shape_info) {
138   ShapeRefiner shape_refiner(graph->versions(), graph->op_registry());
139   shape_refiner.set_require_shape_inference_fns(false);
140   // TODO(dlibenzi): Verify if it is worth trying to infer shaped within
141   // functions. Some functions can be called at multiple locations with
142   // difference shapes, which will trigger a shape inference based on the
143   // arguments passed at the first call.
144   // shape_refiner.set_function_library_for_shape_inference(fnlib_def);
145 
146   // ShapeRefiner requires that all inputs of a node are present when
147   // ShapeRefiner::AddNode is called. To get at least some shape information in
148   // loops, we temporarily remove loop backedges and add them back again after
149   // the shape inference is complete.
150   BackEdgeHelper back_edge;
151   TF_RETURN_IF_ERROR(back_edge.Remove(graph));
152   TF_RETURN_IF_ERROR(PropagateShapes(*graph, arg_shapes, &shape_refiner));
153   TF_RETURN_IF_ERROR(back_edge.Replace());
154 
155   // Currently information does not flow "backward" from consumers to producers
156   // in the shape inference, but we consume the shapes in a second pass in case
157   // backward information flow is added in the future.
158   return StoreOutputShapes(*graph, shape_refiner, shape_info);
159 }
160 
MergeInferredShapes(const InferredShape & a,const InferredShape & b)161 xla::StatusOr<InferredShape> MergeInferredShapes(const InferredShape& a,
162                                                  const InferredShape& b) {
163   InferredShape result;
164   TF_RETURN_IF_ERROR(a.shape.MergeWith(b.shape, &result.shape));
165 
166   if (a.handle_type == DT_INVALID) {
167     result.handle_type = b.handle_type;
168   } else if (b.handle_type == DT_INVALID) {
169     result.handle_type = a.handle_type;
170   } else if (a.handle_type == b.handle_type) {
171     result.handle_type = a.handle_type;
172   } else {
173     return errors::InvalidArgument(
174         "Mismatched resource types: ", DataTypeString(a.handle_type), " vs. ",
175         DataTypeString(b.handle_type));
176   }
177   TF_RETURN_IF_ERROR(
178       a.handle_shape.MergeWith(b.handle_shape, &result.handle_shape));
179   return result;
180 }
181 
182 }  // namespace tensorflow
183