• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_CORE_GRAPPLER_COSTS_GRAPH_PROPERTIES_H_
17 #define TENSORFLOW_CORE_GRAPPLER_COSTS_GRAPH_PROPERTIES_H_
18 
19 #include <unordered_map>
20 #include <unordered_set>
21 #include <vector>
22 
23 #include "absl/container/flat_hash_map.h"
24 #include "tensorflow/core/framework/shape_inference.h"
25 #include "tensorflow/core/grappler/clusters/cluster.h"
26 #include "tensorflow/core/grappler/costs/op_performance_data.pb.h"
27 #include "tensorflow/core/grappler/grappler_item.h"
28 
29 namespace tensorflow {
30 
31 namespace grappler {
32 
33 // Optional attributes that tell about node output information.
34 // We use these side information, if provided, for static shape inference
35 // and VirtualScheduler scheduling.
36 
37 // Switch op attribute as a vector of int that tells which branch the
38 // Switch output is taken on every round of execution.
39 // Used for scheduling ops after Switch correctly (e.g., While loop).
40 ABSL_CONST_INIT const char kOutputSlots[] = "_output_slot_vector";
41 
42 // Example:
43 // Assume a node has two outputs and iterated for three times. Then it has:
44 // _execution_count = 3
45 // _output_sizes_vector = [2, 2, 2]
46 // _output_dtype_vector.size = 6
47 // _output_shape_vector.size = 6
48 
49 // If all the iterations have same output shapes, then
50 // _execution_count = 3
51 // _same_output_for_iterations = true
52 // _output_sizes_vector = [2]
53 // _output_dtype_vector.size = 2
54 // _output_shape_vector.size = 2
55 
56 // How many times this node has been executed.
57 ABSL_CONST_INIT const char kExecutionCount[] = "_execution_count";
58 
59 // Records the output sizes for each round of execution.
60 ABSL_CONST_INIT const char kOutputSizes[] = "_output_sizes_vector";
61 
62 // The node has been scheduled multiple times with outputs that have the same
63 // shape.
64 ABSL_CONST_INIT const char kOutputSame[] = "_same_output_for_iterations";
65 
66 // Outputs DataType vector.
67 ABSL_CONST_INIT const char kOutputTypes[] = "_output_dtype_vector";
68 
69 // Outputs TensorShapeProto vector.
70 ABSL_CONST_INIT const char kOutputShapes[] = "_output_shape_vector";
71 
72 class SymbolicShapeRefiner;
73 class TopoQueue;
74 
75 // Infer OpInfo::TensorProperties for graph nodes inputs/outputs.
76 //
77 // Typical use case, is to infer tensor properties from a graph, before doing
78 // optimization pass. Nodes modified during optimization pass have to be
79 // invalidated, to prevent further incorrect optimizations based on wrong shape
80 // and data type properties.
81 class GraphProperties {
82  public:
83   // The item must outlive the properties
GraphProperties(const GrapplerItem & item)84   explicit GraphProperties(const GrapplerItem& item) : item_(item) {}
85 
86   // Infer the shapes through abstract interpretation. Feed information can be
87   // incorrect so it should be discarded to ensure correctness of the analysis.
88   // However, it can help infer shapes in the fanout of fed nodes (even though
89   // the correctness of these shapes can't be guaranteed), so in some cases
90   // (such as simulation or scheduling) it makes sense of keep these shapes.
91   // aggressive_shape_inference option executes nodes on the host to identify
92   // output values when possible and does other aggressive strategies.
93   // Similar to assuming_valid_feeds, this may cause incorrectness in graph
94   // analyses, but is useful for simulation or scheduling.
95   // If include_input_tensor_values is true, the values of constant tensors
96   // will included in the input properties.
97   // If include_output_tensor_values is true, the values of constant tensors
98   // will be included in the output properties.
99   Status InferStatically(bool assume_valid_feeds,
100                          bool aggressive_shape_inference,
101                          bool include_input_tensor_values,
102                          bool include_output_tensor_values);
InferStatically(bool assume_valid_feeds,bool aggressive_shape_inference,bool include_tensor_values)103   Status InferStatically(bool assume_valid_feeds,
104                          bool aggressive_shape_inference,
105                          bool include_tensor_values) {
106     return InferStatically(
107         assume_valid_feeds,
108         /*aggressive_shape_inference=*/aggressive_shape_inference,
109         /*include_input_tensor_values=*/include_tensor_values,
110         /*include_output_tensor_values=*/include_tensor_values);
111   }
InferStatically(bool assume_valid_feeds)112   Status InferStatically(bool assume_valid_feeds) {
113     return InferStatically(assume_valid_feeds,
114                            /*aggressive_shape_inference=*/false,
115                            /*include_tensor_values=*/true);
116   }
117   // Infer the shape by running the graph on the specified cluster and recording
118   // the shapes of the processed tensors.
119   Status InferDynamically(Cluster* cluster);
120   // Extract the properties from a cost graph. For testing only since there is
121   // no way to ensure that the cost graph match the item.
122   Status InferFromCostGraph(const CostGraphDef& cost_graph);
123 
124   // Stores `item_.graph` with the inferred output shapes to `output_graph_def`.
125   Status AnnotateOutputShapes(GraphDef* output_graph_def,
126                               bool allow_symbolic_shapes) const;
127 
AnnotateOutputShapes(GraphDef * output_graph_def)128   Status AnnotateOutputShapes(GraphDef* output_graph_def) const {
129     return AnnotateOutputShapes(output_graph_def, false);
130   }
131 
132   // Return the properties of node inputs/outputs, including data types and
133   // shapes. Note that the dimensions in the shapes can be negative. We use the
134   // -1 value to denote that we don't know anything about a dimension. We use
135   // values strictly less than -1 to encode symbolic dimensions: although we
136   // don't know the actual value of the symbolic dimension, we know that all the
137   // dimensions denoted by the same negative value are the equal.
138   bool HasInputProperties(const string& node_name) const;
139   bool HasOutputProperties(const string& node_name) const;
140   const std::vector<OpInfo::TensorProperties>& GetInputProperties(
141       const string& node_name) const;
142   const std::vector<OpInfo::TensorProperties>& GetOutputProperties(
143       const string& node_name) const;
144 
145   // Invalidate input/output properties for nodes modified during graph
146   // optimization pass, to prevent potential optimizations, based on incorrect
147   // shape information.
148   void ClearInputProperties(const string& node_name);
149   void ClearOutputProperties(const string& node_name);
150   // Returns true if we have *any* properties.
has_properties()151   bool has_properties() const {
152     return !input_properties_.empty() || !output_properties_.empty();
153   }
154 
CheckShapeIncompatible(const string & node_name)155   bool CheckShapeIncompatible(const string& node_name) const {
156     return incompatible_shape_nodes_.find(node_name) !=
157            incompatible_shape_nodes_.end();
158   }
159 
160  private:
161   // Relaxes shapes <shapes_and_types>, determined from an EnqueueV2 node, into
162   // <*queue_shapes_and_types>.
163   static Status RelaxEnqueueShapesAndMergeTypes(
164       SymbolicShapeRefiner* shape_refiner, const NodeDef* qnode,
165       const std::vector<shape_inference::ShapeAndType>& shapes_and_types,
166       std::vector<shape_inference::ShapeAndType>* queue_shapes_and_types);
167 
168   // Update the shapes of the enqueue node, port them over to the corresponding
169   // queue, and schedule the reprocessing of the queue if needed.
170   static Status UpdateEnqueue(
171       const NodeDef* enqueue_node,
172       const absl::flat_hash_map<const NodeDef*, const NodeDef*>&
173           resource_handles,
174       SymbolicShapeRefiner* shape_refiner, bool* new_shapes);
175 
176   // Update the shapes and types of the Queue node, if not set by Enqueue node.
177   static Status UpdateQueue(const NodeDef* queue_node,
178                             SymbolicShapeRefiner* shape_refiner,
179                             bool* new_shapes);
180 
181   // Update the output shapes of a Merge node, and enqueue its fanout in
182   // new_shapes if needed.
183   Status UpdateMerge(SymbolicShapeRefiner* shape_refiner, const NodeDef* node,
184                      bool* new_shapes) const;
185   // Process the Enter node, and enqueue its fanout in new_shapes if needed.
186   static Status UpdateEnter(SymbolicShapeRefiner* shape_refiner,
187                             const NodeDef* node, bool* new_shapes);
188   // Update the shapes for node 'n'. If output shapes for n have changed,
189   // enqueue its fanout in 'new_shapes'.
190   Status UpdateShapes(SymbolicShapeRefiner* shape_refiner,
191                       const absl::flat_hash_map<const NodeDef*, const NodeDef*>&
192                           resource_handles,
193                       const NodeDef* n, bool* new_shapes) const;
194   // Propagate the shapes for the nodes enqueued in new_shapes and their
195   // transitive fanout until a fixed point is reached.
196   Status PropagateShapes(
197       SymbolicShapeRefiner* shape_refiner, TopoQueue* new_shapes,
198       const absl::flat_hash_map<const NodeDef*, const NodeDef*>&
199           resource_handles,
200       int num_loops) const;
201 
202   // Data members
203   const GrapplerItem& item_;
204   absl::flat_hash_map<string, std::vector<OpInfo::TensorProperties>>
205       input_properties_;
206   absl::flat_hash_map<string, std::vector<OpInfo::TensorProperties>>
207       output_properties_;
208   const std::vector<OpInfo::TensorProperties> missing_properties_;
209 
210   // Nodes with output shape incompatible between shape inference and
211   // annotation.
212   std::unordered_set<string> incompatible_shape_nodes_;
213 };
214 
215 // Helper function for GraphProperties.
216 bool IsShapeFullyDefinedIntegerVectorOrScalar(
217     shape_inference::InferenceContext* ic,
218     const shape_inference::ShapeHandle& shape,
219     const shape_inference::ShapeHandle& tensor_as_shape, const DataType& dtype);
220 
221 }  // end namespace grappler
222 }  // end namespace tensorflow
223 
224 #endif  // TENSORFLOW_CORE_GRAPPLER_COSTS_GRAPH_PROPERTIES_H_
225