• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #define EIGEN_USE_THREADS
17 
18 #include <vector>
19 
20 #include "tensorflow/cc/client/client_session.h"
21 #include "tensorflow/cc/ops/array_ops.h"
22 #include "tensorflow/cc/ops/const_op.h"
23 #include "tensorflow/cc/ops/image_ops.h"
24 #include "tensorflow/core/framework/node_def_builder.h"
25 #include "tensorflow/core/framework/node_def_util.h"
26 #include "tensorflow/core/framework/shape_inference_testutil.h"
27 #include "tensorflow/core/framework/tensor_testutil.h"
28 #include "tensorflow/core/graph/gradients.h"
29 #include "tensorflow/core/kernels/quantization_utils.h"
30 #include "tensorflow/core/lib/core/status_test_util.h"
31 #include "tensorflow/core/platform/test.h"
32 
33 namespace tensorflow {
34 
35 namespace {
36 constexpr const float RESIZE_VAL_TOLERANCE = 1.0e-8;
37 
38 template <typename T>
BuildTensor(const int batch_size,const int height,const int width,const int channels,const float ratio,const float min,const float max)39 Tensor BuildTensor(const int batch_size, const int height, const int width,
40                    const int channels, const float ratio, const float min,
41                    const float max) {
42   Tensor tensor(DataTypeToEnum<T>::value,
43                 TensorShape({batch_size, height, width, channels}));
44   for (int64 i = 0; i < tensor.NumElements(); ++i) {
45     tensor.flat<T>()(i) =
46         FloatToQuantized<T>(static_cast<float>(i) / ratio, min, max);
47   }
48   return tensor;
49 }
50 
51 template <>
BuildTensor(const int batch_size,const int height,const int width,const int channels,const float ratio,const float min,const float max)52 Tensor BuildTensor<float>(const int batch_size, const int height,
53                           const int width, const int channels,
54                           const float ratio, const float min, const float max) {
55   Tensor tensor(DT_FLOAT, TensorShape({batch_size, height, width, channels}));
56   for (int64 i = 0; i < tensor.NumElements(); ++i) {
57     tensor.flat<float>()(i) = static_cast<float>(i) / ratio;
58   }
59   return tensor;
60 }
61 
CalculateResizeScale(int64 in_size,int64 out_size,bool align_corners)62 float CalculateResizeScale(int64 in_size, int64 out_size, bool align_corners) {
63   return (align_corners && out_size > 1)
64              ? (in_size - 1) / static_cast<float>(out_size - 1)
65              : in_size / static_cast<float>(out_size);
66 }
67 
GetReferenceWeight(const int64 out_size,const int64 in_size,const int step,const int index,const float scale)68 inline std::tuple<int64, int64, float> GetReferenceWeight(const int64 out_size,
69                                                           const int64 in_size,
70                                                           const int step,
71                                                           const int index,
72                                                           const float scale) {
73   const float in = index * scale;
74   const int64 lower = static_cast<int64>(in);
75   const int64 upper = std::min(lower + 1, in_size - 1);
76   return std::make_tuple(lower * step, upper * step, in - lower);
77 }
78 
79 template <typename T>
ComputeLerpReference(const T in_top_left,const T in_top_right,const T in_bottom_left,const T in_bottom_right,const float x_lerp,const float y_lerp,const float min,const float max)80 T ComputeLerpReference(const T in_top_left, const T in_top_right,
81                        const T in_bottom_left, const T in_bottom_right,
82                        const float x_lerp, const float y_lerp, const float min,
83                        const float max) {
84   const float top_left = QuantizedToFloat<T>(in_top_left, min, max);
85   const float top_right = QuantizedToFloat<T>(in_top_right, min, max);
86   const float bottom_left = QuantizedToFloat<T>(in_bottom_left, min, max);
87   const float bottom_right = QuantizedToFloat<T>(in_bottom_right, min, max);
88   const float top = top_left + (top_right - top_left) * x_lerp;
89   const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp;
90   const float out = top + (bottom - top) * y_lerp;
91   return FloatToQuantized<T>(out, min, max);
92 }
93 
94 template <>
ComputeLerpReference(const float in_top_left,const float in_top_right,const float in_bottom_left,const float in_bottom_right,const float x_lerp,const float y_lerp,const float min,const float max)95 float ComputeLerpReference<float>(const float in_top_left,
96                                   const float in_top_right,
97                                   const float in_bottom_left,
98                                   const float in_bottom_right,
99                                   const float x_lerp, const float y_lerp,
100                                   const float min, const float max) {
101   const float top = in_top_left + (in_top_right - in_top_left) * x_lerp;
102   const float bottom =
103       in_bottom_left + (in_bottom_right - in_bottom_left) * x_lerp;
104   return top + (bottom - top) * y_lerp;
105 }
106 
107 template <typename T>
CalcReferenceResizedVal(const T * image_data,const int batch_size,const int64 in_height,const int64 in_width,const int64 out_height,const int64 out_width,const int channels,const float height_scale,const float width_scale,const float min,const float max,const int b,const int64 x,const int64 y,const int c)108 T CalcReferenceResizedVal(const T* image_data, const int batch_size,
109                           const int64 in_height, const int64 in_width,
110                           const int64 out_height, const int64 out_width,
111                           const int channels, const float height_scale,
112                           const float width_scale, const float min,
113                           const float max, const int b, const int64 x,
114                           const int64 y, const int c) {
115   const std::tuple<int64, int64, float> x_weight =
116       GetReferenceWeight(out_width, in_width, channels, x, width_scale);
117   const std::tuple<int64, int64, float> y_weight =
118       GetReferenceWeight(out_height, in_height, 1, y, height_scale);
119 
120   const int64 in_row_size = in_width * channels;
121   const int64 in_batch_num_values = in_height * in_row_size;
122 
123   const int y_lower_index =
124       b * in_batch_num_values + std::get<0>(y_weight) * in_row_size;
125   const int y_upper_index =
126       b * in_batch_num_values + std::get<1>(y_weight) * in_row_size;
127 
128   const int64 xs_lower = std::get<0>(x_weight);
129   const int64 xs_upper = std::get<1>(x_weight);
130   const float xs_lerp = std::get<2>(x_weight);
131   const float ys_lerp = std::get<2>(y_weight);
132   const float top_left = image_data[y_lower_index + xs_lower + c];
133   const float top_right = image_data[y_lower_index + xs_upper + c];
134   const float bottom_left = image_data[y_upper_index + xs_lower + c];
135   const float bottom_right = image_data[y_upper_index + xs_upper + c];
136   const float val =
137       ComputeLerpReference<T>(top_left, top_right, bottom_left, bottom_right,
138                               xs_lerp, ys_lerp, min, max);
139   return val;
140 }
141 
142 template <typename T>
CheckTensorValue(const T * in_data,const T * out_data,const int batch_size,const int64 in_height,const int64 in_width,const int64 out_height,const int64 out_width,const int channels,const bool align_corners,const float min,const float max,const float tolerance,const bool relative)143 void CheckTensorValue(const T* in_data, const T* out_data, const int batch_size,
144                       const int64 in_height, const int64 in_width,
145                       const int64 out_height, const int64 out_width,
146                       const int channels, const bool align_corners,
147                       const float min, const float max, const float tolerance,
148                       const bool relative) {
149   const int64 out_row_size = out_width * channels;
150   const float height_scale =
151       CalculateResizeScale(in_height, out_height, align_corners);
152   const float width_scale =
153       CalculateResizeScale(in_width, out_width, align_corners);
154 
155   for (int b = 0; b < batch_size; ++b) {
156     for (int64 y = 0; y < out_height; ++y) {
157       for (int64 x = 0; x < out_width; ++x) {
158         for (int c = 0; c < channels; ++c) {
159           const T ref_qval = CalcReferenceResizedVal<T>(
160               in_data, batch_size, in_height, in_width, out_height, out_width,
161               channels, height_scale, width_scale, min, max, b, x, y, c);
162           const T qval =
163               out_data[(b * out_height + y) * out_row_size + x * channels + c];
164           const float ref_val = QuantizedToFloat<T>(ref_qval, min, max);
165           const float val = QuantizedToFloat<T>(qval, min, max);
166           if (!relative) {
167             const int q_tolerance = std::round(tolerance);
168             EXPECT_TRUE(std::abs(static_cast<int32>(ref_qval) -
169                                  static_cast<int32>(qval)) <= q_tolerance)
170                 << "ref = " << ref_val << ", val = " << val << ", " << b << ", "
171                 << y << ", " << x << ", " << c << ", qval = " << qval
172                 << ", ref qval = " << ref_qval << ", " << q_tolerance;
173           } else {
174             const float rel_tolerance = std::max(ref_val, 1.0f) * tolerance;
175             EXPECT_NEAR(ref_val, val, rel_tolerance)
176                 << "ref = " << ref_val << ", val = " << val << ", " << b << ", "
177                 << y << ", " << x << ", " << c << ", ref qval = " << qval;
178           }
179         }
180       }
181     }
182   }
183 }
184 
TestResizeBilinear(const Tensor & image_tensor,const DataType dt,const Input::Initializer & new_size,const bool show_time,const int64 iterations,const float min,const float max,std::vector<Tensor> * outputs)185 void TestResizeBilinear(const Tensor& image_tensor, const DataType dt,
186                         const Input::Initializer& new_size,
187                         const bool show_time, const int64 iterations,
188                         const float min, const float max,
189                         std::vector<Tensor>* outputs) {
190   Scope root = Scope::NewRootScope();
191 
192   Output placeholder = ops::Placeholder(root.WithOpName("placeholder"), dt);
193   Output size = ops::Const<int32>(root.WithOpName("size"), new_size);
194   Output in_min = ops::Const<float>(root.WithOpName("min"), min);
195   Output in_max = ops::Const<float>(root.WithOpName("max"), max);
196 
197   ops::QuantizedResizeBilinear qrb = ops::QuantizedResizeBilinear(
198       root.WithOpName("qrb"), placeholder, size, in_min, in_max);
199 
200   TF_EXPECT_OK(root.status());
201 
202   ClientSession session(root);
203 
204   int64 total_duration = 0;
205   outputs->clear();
206 
207   for (int i = 0; i < iterations; ++i) {
208     const int64 start_time = Env::Default()->NowMicros();
209     TF_EXPECT_OK(session.Run({{placeholder, image_tensor}},
210                              {qrb.resized_images, qrb.out_min, qrb.out_max},
211                              outputs));
212     const int64 end_time = Env::Default()->NowMicros();
213     total_duration += end_time - start_time;
214   }
215   const int64 one_run_duration = total_duration / iterations;
216 
217   const int64 num_ops = outputs->at(0).NumElements();
218 
219   const double million_ops_per_second =
220       (iterations * num_ops) / static_cast<double>(total_duration);
221 
222   if (show_time) {
223     LOG(INFO) << "Time resize bilinear: "
224               << TensorShape(image_tensor.shape()).DebugString()
225               << ": iterations=" << iterations
226               << ", MOps/s=" << million_ops_per_second
227               << ", one_run_duration=" << one_run_duration
228               << ", total_duration=" << total_duration;
229   }
230 }
231 
232 }  // namespace
233 
TestResizeBilinearOneDim()234 void TestResizeBilinearOneDim() {
235   constexpr float TOLERANCE = 1.0e-5;
236   constexpr int IN_WIDTH = 128;
237   constexpr int OUT_WIDTH = 256;
238   constexpr float MIN = 0.0f;
239   constexpr float MAX = 256.0f;
240   constexpr float SCALE = static_cast<float>(IN_WIDTH) / OUT_WIDTH;
241   Tensor image_quantized_tensor(DT_QINT32, TensorShape({1, 1, IN_WIDTH, 1}));
242 
243   for (int64 i = 0; i < image_quantized_tensor.NumElements(); ++i) {
244     image_quantized_tensor.flat<qint32>()(i) =
245         FloatToQuantized<qint32>(static_cast<float>(i), MIN, MAX);
246   }
247 
248   std::vector<Tensor> outputs;
249   TestResizeBilinear(image_quantized_tensor, DT_QINT32, {1, OUT_WIDTH}, false,
250                      1, MIN, MAX, &outputs);
251   ASSERT_EQ(3, outputs.size());
252   ASSERT_EQ(OUT_WIDTH, outputs.at(0).NumElements());
253   ASSERT_EQ(4, outputs.at(0).shape().dims());
254   ASSERT_EQ(OUT_WIDTH, outputs.at(0).shape().dim_size(2));
255 
256   // Manual value testing
257   for (int64 i = 0; i < outputs.at(0).NumElements(); ++i) {
258     const float resized_image_val =
259         QuantizedToFloat<qint32>(outputs.at(0).flat<qint32>()(i), MIN, MAX);
260     float expected_val = 0.0f;
261     if (i == 0 || i == outputs.at(0).NumElements() - 1 || i % 2 == 0) {
262       expected_val = QuantizedToFloat<qint32>(
263           image_quantized_tensor.flat<qint32>()(i / 2), MIN, MAX);
264     } else {
265       const float image_val0 = QuantizedToFloat<qint32>(
266           image_quantized_tensor.flat<qint32>()(i / 2), MIN, MAX);
267       const float image_val1 = QuantizedToFloat<qint32>(
268           image_quantized_tensor.flat<qint32>()(i / 2 + 1), MIN, MAX);
269       expected_val = (image_val0 + image_val1) * SCALE;
270     }
271     VLOG(1) << "(" << i << ") " << expected_val << ", " << resized_image_val;
272     EXPECT_NEAR(expected_val, resized_image_val, RESIZE_VAL_TOLERANCE)
273         << expected_val << ", " << resized_image_val;
274   }
275 
276   // Value testing with reference implemenatation
277   CheckTensorValue<qint32>(image_quantized_tensor.flat<qint32>().data(),
278                            outputs.at(0).flat<qint32>().data(),
279                            /*batch_size=*/1,
280                            /*in_height=*/IN_WIDTH,
281                            /*in_width=*/1,
282                            /*out_height=*/OUT_WIDTH,
283                            /*out_width=*/1,
284                            /*channels=*/1,
285                            /*align_corners=*/false, MIN, MAX, TOLERANCE, true);
286 }
287 
288 template <typename T>
RunTestResizeBilinearTwoDims(int batch_size,int in_height,int in_width,int out_height,int out_width,int channels,float tolerance,bool relative)289 void RunTestResizeBilinearTwoDims(int batch_size, int in_height, int in_width,
290                                   int out_height, int out_width, int channels,
291                                   float tolerance, bool relative) {
292   constexpr float RATIO = 100.0f;
293   const float min = 0.0f;
294   const float max = batch_size * in_height * in_width * channels / RATIO;
295 
296   const Tensor image_quantized_tensor = BuildTensor<T>(
297       batch_size, in_height, in_width, channels, RATIO, min, max);
298 
299   std::vector<Tensor> outputs;
300   TestResizeBilinear(image_quantized_tensor, DataTypeToEnum<T>::value,
301                      {out_height, out_width}, false, 1, min, max, &outputs);
302   CheckTensorValue<T>(image_quantized_tensor.flat<T>().data(),
303                       outputs.at(0).flat<T>().data(), batch_size, in_height,
304                       in_width, out_height, out_width, channels,
305                       /*align_corners=*/false, min, max, tolerance, relative);
306 }
307 
308 template <typename T>
RunBenchmarkResizeBilinearTwoDims(int batch_size,int in_height,int in_width,int out_height,int out_width,int channels,int iteration)309 void RunBenchmarkResizeBilinearTwoDims(int batch_size, int in_height,
310                                        int in_width, int out_height,
311                                        int out_width, int channels,
312                                        int iteration) {
313   constexpr float RATIO = 100.0f;
314   const float min = 0.0f;
315   const float max = batch_size * in_height * in_width * channels / RATIO;
316 
317   const Tensor image_quantized_tensor = BuildTensor<T>(
318       batch_size, in_height, in_width, channels, RATIO, min, max);
319 
320   std::vector<Tensor> outputs;
321   TestResizeBilinear(image_quantized_tensor, DataTypeToEnum<T>::value,
322                      {out_height, out_width}, true, iteration, min, max,
323                      &outputs);
324 }
325 
326 template <typename T>
TestResizeBilinearTwoDimsType(const float tolerance,const bool relative)327 void TestResizeBilinearTwoDimsType(const float tolerance, const bool relative) {
328   RunTestResizeBilinearTwoDims<T>(1, 1, 1, 1, 1, 1, tolerance, relative);
329   RunTestResizeBilinearTwoDims<T>(1, 1, 128, 1, 256, 1, tolerance, relative);
330   RunTestResizeBilinearTwoDims<T>(1, 128, 1, 256, 1, 1, tolerance, relative);
331   RunTestResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 1, tolerance,
332                                   relative);
333   RunTestResizeBilinearTwoDims<T>(1, 256, 256, 128, 128, 1, tolerance,
334                                   relative);
335   RunTestResizeBilinearTwoDims<T>(1, 1, 128, 1, 256, 2, tolerance, relative);
336   RunTestResizeBilinearTwoDims<T>(1, 128, 1, 256, 1, 2, tolerance, relative);
337   RunTestResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 2, tolerance,
338                                   relative);
339   RunTestResizeBilinearTwoDims<T>(1, 256, 256, 128, 128, 2, tolerance,
340                                   relative);
341   RunTestResizeBilinearTwoDims<T>(1, 1, 16, 1, 32, 3, tolerance, relative);
342   RunTestResizeBilinearTwoDims<T>(1, 1, 128, 1, 256, 3, tolerance, relative);
343   RunTestResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 3, tolerance,
344                                   relative);
345   RunTestResizeBilinearTwoDims<T>(1, 256, 256, 128, 128, 3, tolerance,
346                                   relative);
347 }
348 
TestResizeBilinearTwoDims()349 void TestResizeBilinearTwoDims() {
350   TestResizeBilinearTwoDimsType<quint8>(1.0f, false);
351   TestResizeBilinearTwoDimsType<qint32>(1.0e-5, true);
352   TestResizeBilinearTwoDimsType<float>(1.0e-5, true);
353 }
354 
355 template <typename T>
RunBenchmarkResizeBilinearTwoDimsType()356 void RunBenchmarkResizeBilinearTwoDimsType() {
357   constexpr int ITER = 100;
358   RunBenchmarkResizeBilinearTwoDims<T>(1, 1, 1, 2, 2, 1, ITER);
359   RunBenchmarkResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 1, ITER);
360   RunBenchmarkResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 3, ITER);
361   RunBenchmarkResizeBilinearTwoDims<T>(1, 64, 64, 128, 128, 2, ITER);
362   RunBenchmarkResizeBilinearTwoDims<T>(1, 32, 32, 64, 64, 16, ITER);
363 }
364 
RunBenchmarkResizeBilinearTwoDims()365 void RunBenchmarkResizeBilinearTwoDims() {
366   LOG(INFO) << "Benchmark quint8";
367   RunBenchmarkResizeBilinearTwoDimsType<quint8>();
368   LOG(INFO) << "Benchmark qint32";
369   RunBenchmarkResizeBilinearTwoDimsType<qint32>();
370   LOG(INFO) << "Benchmark float";
371   RunBenchmarkResizeBilinearTwoDimsType<float>();
372 }
373 
374 }  // namespace tensorflow
375 
376 #define RUN_TEST(t) \
377   TEST(QuantizationResizeBilenarTest, t) { tensorflow::t(); }
378 
379 RUN_TEST(TestResizeBilinearOneDim);
380 RUN_TEST(TestResizeBilinearTwoDims);
381 
382 #if defined(__ANDROID__)
383 
384 RUN_TEST(RunBenchmarkResizeBilinearTwoDims);
385 
386 #endif  // __ANDROID__
387 
main(int argc,char ** argv)388 int main(int argc, char** argv) {
389   // On Linux, add: FLAGS_logtostderr = true;
390   ::testing::InitGoogleTest(&argc, argv);
391   return RUN_ALL_TESTS();
392 }
393