• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/core/grappler/costs/utils.h"
17 #include "tensorflow/core/framework/graph.pb.h"
18 #include "tensorflow/core/framework/node_def_builder.h"
19 #include "tensorflow/core/framework/op_kernel.h"
20 #include "tensorflow/core/framework/tensor.h"
21 #include "tensorflow/core/framework/tensor_testutil.h"
22 #include "tensorflow/core/framework/types.h"
23 #include "tensorflow/core/framework/types.pb.h"
24 #include "tensorflow/core/platform/test.h"
25 
26 namespace tensorflow {
27 namespace grappler {
28 
29 namespace {
30 
CreateConstOp(const string & name,std::initializer_list<int64> dims,NodeDef * node)31 void CreateConstOp(const string& name, std::initializer_list<int64> dims,
32                    NodeDef* node) {
33   Tensor tensor(DT_FLOAT, TensorShape(dims));
34   for (int64 i = 0; i < tensor.NumElements(); ++i)
35     tensor.flat<float>()(i) = i / 10.0f;
36   TF_CHECK_OK(NodeDefBuilder(name, "Const")
37                   .Attr("dtype", DT_FLOAT)
38                   .Attr("value", tensor)
39                   .Finalize(node));
40 }
41 
CreateConstSizesOp(const string & name,const std::vector<int32> & sizes,NodeDef * node)42 void CreateConstSizesOp(const string& name, const std::vector<int32>& sizes,
43                         NodeDef* node) {
44   TensorShape shape;
45   shape.AddDim(sizes.size());
46   Tensor tensor(DT_INT32, shape);
47   for (int64 i = 0; i < tensor.NumElements(); ++i)
48     tensor.flat<int32>()(i) = sizes[i];
49   TF_CHECK_OK(NodeDefBuilder(name, "Const")
50                   .Attr("dtype", DT_INT32)
51                   .Attr("value", tensor)
52                   .Finalize(node));
53 }
54 
55 // Helper method for converting shapes vector to TensorProperty.
ShapeToTensorProperty(const std::vector<int> & shapes,const DataType & data_type)56 OpInfo::TensorProperties ShapeToTensorProperty(const std::vector<int>& shapes,
57                                                const DataType& data_type) {
58   OpInfo::TensorProperties prop;
59   prop.set_dtype(data_type);
60   for (int shape : shapes) prop.mutable_shape()->add_dim()->set_size(shape);
61   return prop;
62 }
63 
TEST(UtilsTest,ConvOpInfo)64 TEST(UtilsTest, ConvOpInfo) {
65   int batch = 32;
66   int rows = 7;
67   int cols = 9;
68   int filter_rows = 3;
69   int filter_cols = 3;
70   int out_rows = 7;
71   int out_cols = 9;
72   int in_depth = 3;
73   int out_depth = 5;
74   int stride = 1;
75 
76   std::unordered_map<string, const NodeDef*> name_to_node;
77   GraphDef graph;
78   NodeDef* input = graph.add_node();
79   name_to_node["input"] = input;
80   CreateConstOp("input", {batch, rows, cols, in_depth}, input);
81   NodeDef* filter = graph.add_node();
82   name_to_node["filter"] = filter;
83   CreateConstOp("filter", {filter_rows, filter_cols, in_depth, out_depth},
84                 filter);
85   NodeDef* output_backprop = graph.add_node();
86   name_to_node["output_backprop"] = output_backprop;
87   CreateConstOp("output_backprop", {batch, out_rows, out_cols, out_depth},
88                 output_backprop);
89   NodeDef* input_sizes = graph.add_node();
90   name_to_node["input_sizes"] = input;
91   CreateConstSizesOp("input_sizes",
92                      std::vector<int32>({batch, rows, cols, in_depth}),
93                      input_sizes);
94   NodeDef* filter_sizes = graph.add_node();
95   name_to_node["filter_sizes"] = filter_sizes;
96   CreateConstSizesOp(
97       "filter_sizes",
98       std::vector<int32>({filter_rows, filter_cols, in_depth, out_depth}),
99       filter_sizes);
100 
101   TensorShape paddings_shape({4, 2});
102   Tensor paddings_tensor(DT_INT32, paddings_shape);
103   for (int64 i = 0; i < paddings_tensor.NumElements(); ++i) {
104     paddings_tensor.flat<int32>()(i) = 0;
105   }
106   TF_CHECK_OK(NodeDefBuilder("paddings", "Const")
107                   .Attr("dtype", DT_INT32)
108                   .Attr("value", paddings_tensor)
109                   .Finalize(graph.add_node()));
110 
111   // Now add the convolution op
112   NodeDef* conv = graph.add_node();
113   TF_CHECK_OK(NodeDefBuilder("conv2d", "Conv2D")
114                   .Input("input", 0, DT_FLOAT)
115                   .Input("filter", 0, DT_FLOAT)
116                   .Attr("strides", {1, stride, stride, 1})
117                   .Attr("padding", "SAME")
118                   .Finalize(conv));
119 
120   NodeDef* conv_bp_in = graph.add_node();
121   TF_CHECK_OK(NodeDefBuilder("conv2d_bp_in", "Conv2DBackpropInput")
122                   .Input("input_sizes", 0, DT_INT32)
123                   .Input("filter", 0, DT_FLOAT)
124                   .Input("output_backprop", 0, DT_FLOAT)
125                   .Attr("strides", {1, stride, stride, 1})
126                   .Attr("padding", "SAME")
127                   .Finalize(conv_bp_in));
128 
129   NodeDef* conv_bp_filter = graph.add_node();
130   TF_CHECK_OK(NodeDefBuilder("conv2d_bp_filter", "Conv2DBackpropFilter")
131                   .Input("input", 0, DT_FLOAT)
132                   .Input("filter_sizes", 0, DT_INT32)
133                   .Input("output_backprop", 0, DT_FLOAT)
134                   .Attr("strides", {1, stride, stride, 1})
135                   .Attr("padding", "SAME")
136                   .Finalize(conv_bp_filter));
137 
138   for (const auto& node : graph.node()) {
139     if (node.name().find("conv2d") != 0) {
140       continue;
141     }
142     std::vector<OpInfo::TensorProperties> inputs;
143     inputs.resize(node.input_size());
144     OpInfo info = BuildOpInfoWithoutDevice(node, name_to_node, inputs);
145     if (node.name() == "conv2d") {
146       EXPECT_EQ(2, info.inputs_size());
147     } else if (node.name() == "conv2dbp_in") {
148       EXPECT_EQ(3, info.inputs_size());
149     } else if (node.name() == "conv2d_bp_filter") {
150       EXPECT_EQ(3, info.inputs_size());
151     }
152   }
153 }
154 
TEST(UtilsTest,TestSkipControlInput)155 TEST(UtilsTest, TestSkipControlInput) {
156   GraphDef graph;
157   TF_CHECK_OK(NodeDefBuilder("constant", "Const")
158                   .Attr("dtype", DT_INT32)
159                   .Finalize(graph.add_node()));
160   TF_CHECK_OK(NodeDefBuilder("constfold", "NoOp")
161                   .ControlInput("constant")
162                   .Finalize(graph.add_node()));
163 
164   std::unordered_map<string, const NodeDef*> name_to_node;
165   for (const auto& node : graph.node()) {
166     name_to_node[node.name()] = &node;
167   }
168 
169   bool node_found = false;
170   for (const auto& node : graph.node()) {
171     if (node.name() == "constfold") {
172       std::vector<OpInfo::TensorProperties> inputs;
173       OpInfo info = BuildOpInfoWithoutDevice(node, name_to_node, inputs);
174       node_found = true;
175       EXPECT_EQ(0, info.inputs_size());
176     }
177   }
178   EXPECT_TRUE(node_found);
179 }
180 
TEST(UtilsTest,CalculateTensorSize)181 TEST(UtilsTest, CalculateTensorSize) {
182   // Test normal usage.
183   EXPECT_EQ(DataTypeSize(DT_FLOAT) * 1,
184             CalculateTensorSize(ShapeToTensorProperty({1}, DT_FLOAT)));
185   EXPECT_EQ(DataTypeSize(DT_FLOAT) * 4 * 4,
186             CalculateTensorSize(ShapeToTensorProperty({4, 4}, DT_FLOAT)));
187   EXPECT_EQ(DataTypeSize(DT_HALF) * 10 * 10 * 10,
188             CalculateTensorSize(ShapeToTensorProperty({10, 10, 10}, DT_HALF)));
189   EXPECT_EQ(
190       DataTypeSize(DT_FLOAT) * 100 * 7 * 8 * 99,
191       CalculateTensorSize(ShapeToTensorProperty({100, 7, 8, 99}, DT_FLOAT)));
192 
193   // Test unknown rank: assumes the tensor to be a scalar.
194   OpInfo::TensorProperties t = ShapeToTensorProperty({100, 7, 8, 99}, DT_FLOAT);
195   t.mutable_shape()->set_unknown_rank(true);
196   EXPECT_EQ(DataTypeSize(DT_FLOAT) * 1, CalculateTensorSize(t));
197 
198   // Test unknown shape: assumes unknown shape (-1) to have size 1.
199   EXPECT_EQ(
200       DataTypeSize(DT_FLOAT) * 1 * 7 * 8 * 99,
201       CalculateTensorSize(ShapeToTensorProperty({-1, 7, 8, 99}, DT_FLOAT)));
202   EXPECT_EQ(
203       DataTypeSize(DT_FLOAT) * 1 * 7 * 1 * 99,
204       CalculateTensorSize(ShapeToTensorProperty({-1, 7, -1, 99}, DT_FLOAT)));
205 }
206 
TEST(UtilsTest,CalculateOutputSize)207 TEST(UtilsTest, CalculateOutputSize) {
208   // Create a set of tensor properties.
209   std::vector<OpInfo::TensorProperties> output = {
210       ShapeToTensorProperty({4, 4}, DT_FLOAT),          // 0
211       ShapeToTensorProperty({-1, 7, -1, 99}, DT_FLOAT)  // 1
212   };
213 
214   // Test valid outputs.
215   EXPECT_EQ(DataTypeSize(DT_FLOAT) * 4 * 4, CalculateOutputSize(output, 0));
216   EXPECT_EQ(DataTypeSize(DT_FLOAT) * 1 * 7 * 1 * 99,
217             CalculateOutputSize(output, 1));
218 
219   // port_num -1 is for control dependency: hard coded 4B.
220   EXPECT_EQ(4, CalculateOutputSize(output, -1));
221 
222   // Invalid port_num (though it may be an error) shall yield zero
223   // output size.
224   EXPECT_EQ(0, CalculateOutputSize(output, 2));
225 }
226 
227 // Class for testing TensorSizeHistogram.
228 class TestTensorSizeHistogram : public TensorSizeHistogram {
229  public:
230   FRIEND_TEST(TensorSizeHistogramTest, Constructor);
231   FRIEND_TEST(TensorSizeHistogramTest, Index);
232   FRIEND_TEST(TensorSizeHistogramTest, Add);
233   FRIEND_TEST(TensorSizeHistogramTest, Merge);
234 };
235 
TEST(TensorSizeHistogramTest,Constructor)236 TEST(TensorSizeHistogramTest, Constructor) {
237   TestTensorSizeHistogram hist;
238   EXPECT_EQ(0, hist.NumElem());
239   EXPECT_EQ(0, hist.SumElem());
240   EXPECT_LT(1000000000, hist.Min());  // Initially, min_ is a very large value.
241   EXPECT_EQ(0, hist.Max());
242   EXPECT_EQ(0.0, hist.Average());
243   const auto& buckets = hist.GetBuckets();
244   for (const auto& bucket : buckets) {
245     EXPECT_EQ(0, bucket);
246   }
247 }
248 
TEST(TensorSizeHistogramTest,Index)249 TEST(TensorSizeHistogramTest, Index) {
250   TestTensorSizeHistogram hist;
251   EXPECT_EQ(0, hist.Index(0));
252   EXPECT_EQ(1, hist.Index(1));
253   EXPECT_EQ(2, hist.Index(2));
254   EXPECT_EQ(2, hist.Index(3));
255   EXPECT_EQ(3, hist.Index(4));
256   EXPECT_EQ(3, hist.Index(5));
257   EXPECT_EQ(3, hist.Index(6));
258   EXPECT_EQ(3, hist.Index(7));
259   EXPECT_EQ(4, hist.Index(8));
260   EXPECT_EQ(4, hist.Index(15));
261   EXPECT_EQ(5, hist.Index(16));
262   EXPECT_EQ(5, hist.Index(31));
263   EXPECT_EQ(6, hist.Index(32));
264   EXPECT_EQ(11, hist.Index(1025));
265 }
266 
TEST(TensorSizeHistogramTest,Add)267 TEST(TensorSizeHistogramTest, Add) {
268   TestTensorSizeHistogram hist;
269   hist.Add(1037);
270   hist.Add(1038);
271   hist.Add(1039);
272 
273   const auto& buckets = hist.GetBuckets();
274   EXPECT_EQ(3, hist.NumElem());
275   EXPECT_EQ(1037 + 1038 + 1039, hist.SumElem());
276   EXPECT_DOUBLE_EQ(1038.0, hist.Average());
277   EXPECT_EQ(1037, hist.Min());
278   EXPECT_EQ(1039, hist.Max());
279   EXPECT_EQ(3, buckets.at(11));
280 }
281 
TEST(TensorSizeHistogramTest,Merge)282 TEST(TensorSizeHistogramTest, Merge) {
283   TestTensorSizeHistogram hist1;
284   const auto& buckets = hist1.GetBuckets();
285   hist1.Add(1037);
286   hist1.Add(1038);
287   hist1.Add(1039);
288 
289   TestTensorSizeHistogram hist2(hist1);
290   hist1.Merge(hist2);
291   EXPECT_EQ(6, hist1.NumElem());
292   EXPECT_EQ(2 * (1037 + 1038 + 1039), hist1.SumElem());
293   EXPECT_DOUBLE_EQ(1038.0, hist1.Average());
294   EXPECT_EQ(1037, hist1.Min());
295   EXPECT_EQ(1039, hist1.Max());
296   EXPECT_EQ(6, buckets.at(11));
297 
298   TestTensorSizeHistogram hist3;
299   hist3.Add(1);
300   hist3.Add(2);
301   hist3.Add(4);
302 
303   hist1.Merge(hist3);
304   EXPECT_EQ(9, hist1.NumElem());
305   EXPECT_EQ(2 * (1037 + 1038 + 1039) + 1 + 2 + 4, hist1.SumElem());
306   EXPECT_DOUBLE_EQ((2 * (1037 + 1038 + 1039) + 1 + 2 + 4) / 9.0,
307                    hist1.Average());
308   EXPECT_EQ(1, hist1.Min());
309   EXPECT_EQ(1039, hist1.Max());
310   EXPECT_EQ(1, buckets.at(1));
311   EXPECT_EQ(1, buckets.at(2));
312   EXPECT_EQ(1, buckets.at(3));
313   EXPECT_EQ(6, buckets.at(11));
314 }
315 
TEST(DeviceClassTest,GetDeviceClass)316 TEST(DeviceClassTest, GetDeviceClass) {
317   EXPECT_EQ(
318       "Channel: /ps/CPU -> /worker/GPU",
319       GetDeviceClass("Channel_from_/job_ps/replica_0/task_0/device_CPU_0_to_"
320                      "/job_worker/replica_7/task_0/device_GPU_7"));
321   EXPECT_EQ(
322       "Channel: /worker_train/CPU -> /ps/GPU",
323       GetDeviceClass(
324           "Channel_from_/job_worker_train/replica_0/task_0/device_CPU_0_to_"
325           "/job_ps/replica_7/task_0/device_GPU_7"));
326 }
327 
TEST(DeviceClassTest,GetDeviceClassForNonChannelDevice)328 TEST(DeviceClassTest, GetDeviceClassForNonChannelDevice) {
329   EXPECT_EQ("Unclassified",
330             GetDeviceClassForNonChannelDevice("SOMETHING_WEIRD_DEVICE_NAME"));
331   EXPECT_EQ("/worker/GPU", GetDeviceClassForNonChannelDevice(
332                                "/job:worker/replica:0/task:0/device:GPU:0"));
333   EXPECT_EQ("/worker/CPU", GetDeviceClassForNonChannelDevice(
334                                "/job:worker/replica:0/task:0/device:CPU:0"));
335   EXPECT_EQ("/worker_train/CPU", GetDeviceClassForNonChannelDevice(
336                                      "/job:worker_train/replica:7/CPU:0"));
337   EXPECT_EQ("//GPU", GetDeviceClassForNonChannelDevice("/device:GPU:7"));
338 }
339 
340 }  // namespace
341 
342 }  // end namespace grappler
343 }  // end namespace tensorflow
344