1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #define EIGEN_USE_THREADS
17
18 #include <vector>
19
20 #include "tensorflow/cc/client/client_session.h"
21 #include "tensorflow/cc/ops/array_ops.h"
22 #include "tensorflow/cc/ops/const_op.h"
23 #include "tensorflow/cc/ops/image_ops.h"
24 #include "tensorflow/core/common_runtime/gradients.h"
25 #include "tensorflow/core/framework/node_def_builder.h"
26 #include "tensorflow/core/framework/node_def_util.h"
27 #include "tensorflow/core/framework/shape_inference_testutil.h"
28 #include "tensorflow/core/framework/tensor_testutil.h"
29 #include "tensorflow/core/kernels/quantization_utils.h"
30 #include "tensorflow/core/lib/core/status_test_util.h"
31 #include "tensorflow/core/platform/test.h"
32
33 namespace tensorflow {
34
35 namespace {
36 constexpr const float RESIZE_VAL_TOLERANCE = 1.0e-8;
37
38 template <typename T>
BuildTensor(const int batch_size,const int height,const int width,const int channels,const float ratio,const float min,const float max)39 Tensor BuildTensor(const int batch_size, const int height, const int width,
40 const int channels, const float ratio, const float min,
41 const float max) {
42 Tensor tensor(DataTypeToEnum<T>::value,
43 TensorShape({batch_size, height, width, channels}));
44 for (int64_t i = 0; i < tensor.NumElements(); ++i) {
45 tensor.flat<T>()(i) =
46 FloatToQuantized<T>(static_cast<float>(i) / ratio, min, max);
47 }
48 return tensor;
49 }
50
51 template <>
BuildTensor(const int batch_size,const int height,const int width,const int channels,const float ratio,const float min,const float max)52 Tensor BuildTensor<float>(const int batch_size, const int height,
53 const int width, const int channels,
54 const float ratio, const float min, const float max) {
55 Tensor tensor(DT_FLOAT, TensorShape({batch_size, height, width, channels}));
56 for (int64_t i = 0; i < tensor.NumElements(); ++i) {
57 tensor.flat<float>()(i) = static_cast<float>(i) / ratio;
58 }
59 return tensor;
60 }
61
CalculateResizeScale(int64_t in_size,int64_t out_size,bool align_corners)62 float CalculateResizeScale(int64_t in_size, int64_t out_size,
63 bool align_corners) {
64 return (align_corners && out_size > 1)
65 ? (in_size - 1) / static_cast<float>(out_size - 1)
66 : in_size / static_cast<float>(out_size);
67 }
68
GetReferenceWeight(const bool half_pixel_centers,const int64_t out_size,const int64_t in_size,const int step,const int index,const float scale)69 inline std::tuple<int64_t, int64_t, float> GetReferenceWeight(
70 const bool half_pixel_centers, const int64_t out_size,
71 const int64_t in_size, const int step, const int index, const float scale) {
72 const float in = half_pixel_centers
73 ? (static_cast<float>(index) + 0.5f) * scale - 0.5f
74 : index * scale;
75 const float in_f = std::floor(in);
76 const int64_t lower =
77 std::max(static_cast<int64_t>(in_f), static_cast<int64_t>(0));
78 const int64_t upper =
79 std::min(static_cast<int64_t>(std::ceil(in)), in_size - 1);
80 return std::make_tuple(lower * step, upper * step, in - in_f);
81 }
82
83 template <typename T>
ComputeLerpReference(const T in_top_left,const T in_top_right,const T in_bottom_left,const T in_bottom_right,const float x_lerp,const float y_lerp,const float min,const float max)84 T ComputeLerpReference(const T in_top_left, const T in_top_right,
85 const T in_bottom_left, const T in_bottom_right,
86 const float x_lerp, const float y_lerp, const float min,
87 const float max) {
88 const float top_left = QuantizedToFloat<T>(in_top_left, min, max);
89 const float top_right = QuantizedToFloat<T>(in_top_right, min, max);
90 const float bottom_left = QuantizedToFloat<T>(in_bottom_left, min, max);
91 const float bottom_right = QuantizedToFloat<T>(in_bottom_right, min, max);
92 const float top = top_left + (top_right - top_left) * x_lerp;
93 const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp;
94 const float out = top + (bottom - top) * y_lerp;
95 return FloatToQuantized<T>(out, min, max);
96 }
97
98 template <>
ComputeLerpReference(const float in_top_left,const float in_top_right,const float in_bottom_left,const float in_bottom_right,const float x_lerp,const float y_lerp,const float min,const float max)99 float ComputeLerpReference<float>(const float in_top_left,
100 const float in_top_right,
101 const float in_bottom_left,
102 const float in_bottom_right,
103 const float x_lerp, const float y_lerp,
104 const float min, const float max) {
105 const float top = in_top_left + (in_top_right - in_top_left) * x_lerp;
106 const float bottom =
107 in_bottom_left + (in_bottom_right - in_bottom_left) * x_lerp;
108 return top + (bottom - top) * y_lerp;
109 }
110
111 template <typename T>
CalcReferenceResizedVal(const T * image_data,const bool half_pixel_centers,const int batch_size,const int64_t in_height,const int64_t in_width,const int64_t out_height,const int64_t out_width,const int channels,const float height_scale,const float width_scale,const float min,const float max,const int b,const int64_t x,const int64_t y,const int c)112 T CalcReferenceResizedVal(const T* image_data, const bool half_pixel_centers,
113 const int batch_size, const int64_t in_height,
114 const int64_t in_width, const int64_t out_height,
115 const int64_t out_width, const int channels,
116 const float height_scale, const float width_scale,
117 const float min, const float max, const int b,
118 const int64_t x, const int64_t y, const int c) {
119 const std::tuple<int64_t, int64_t, float> x_weight = GetReferenceWeight(
120 half_pixel_centers, out_width, in_width, channels, x, width_scale);
121 const std::tuple<int64_t, int64_t, float> y_weight = GetReferenceWeight(
122 half_pixel_centers, out_height, in_height, 1, y, height_scale);
123
124 const int64_t in_row_size = in_width * channels;
125 const int64_t in_batch_num_values = in_height * in_row_size;
126
127 const int y_lower_index =
128 b * in_batch_num_values + std::get<0>(y_weight) * in_row_size;
129 const int y_upper_index =
130 b * in_batch_num_values + std::get<1>(y_weight) * in_row_size;
131
132 const int64_t xs_lower = std::get<0>(x_weight);
133 const int64_t xs_upper = std::get<1>(x_weight);
134 const float xs_lerp = std::get<2>(x_weight);
135 const float ys_lerp = std::get<2>(y_weight);
136 const float top_left = image_data[y_lower_index + xs_lower + c];
137 const float top_right = image_data[y_lower_index + xs_upper + c];
138 const float bottom_left = image_data[y_upper_index + xs_lower + c];
139 const float bottom_right = image_data[y_upper_index + xs_upper + c];
140 const float val =
141 ComputeLerpReference<T>(top_left, top_right, bottom_left, bottom_right,
142 xs_lerp, ys_lerp, min, max);
143 return val;
144 }
145
146 template <typename T>
CheckTensorValue(const T * in_data,const T * out_data,const int batch_size,const int64_t in_height,const int64_t in_width,const int64_t out_height,const int64_t out_width,const int channels,const bool align_corners,const bool half_pixel_centers,const float min,const float max,const float tolerance,const bool relative)147 void CheckTensorValue(const T* in_data, const T* out_data, const int batch_size,
148 const int64_t in_height, const int64_t in_width,
149 const int64_t out_height, const int64_t out_width,
150 const int channels, const bool align_corners,
151 const bool half_pixel_centers, const float min,
152 const float max, const float tolerance,
153 const bool relative) {
154 const int64_t out_row_size = out_width * channels;
155 const float height_scale =
156 CalculateResizeScale(in_height, out_height, align_corners);
157 const float width_scale =
158 CalculateResizeScale(in_width, out_width, align_corners);
159
160 for (int b = 0; b < batch_size; ++b) {
161 for (int64_t y = 0; y < out_height; ++y) {
162 for (int64_t x = 0; x < out_width; ++x) {
163 for (int c = 0; c < channels; ++c) {
164 const T ref_qval = CalcReferenceResizedVal<T>(
165 in_data, half_pixel_centers, batch_size, in_height, in_width,
166 out_height, out_width, channels, height_scale, width_scale, min,
167 max, b, x, y, c);
168 const T qval =
169 out_data[(b * out_height + y) * out_row_size + x * channels + c];
170 const float ref_val = QuantizedToFloat<T>(ref_qval, min, max);
171 const float val = QuantizedToFloat<T>(qval, min, max);
172 if (!relative) {
173 const int q_tolerance = std::round(tolerance);
174 EXPECT_TRUE(std::abs(static_cast<int32>(ref_qval) -
175 static_cast<int32>(qval)) <= q_tolerance)
176 << "ref = " << ref_val << ", val = " << val << ", " << b << ", "
177 << y << ", " << x << ", " << c << ", qval = " << qval
178 << ", ref qval = " << ref_qval << ", " << q_tolerance;
179 } else {
180 const float rel_tolerance = std::max(ref_val, 1.0f) * tolerance;
181 EXPECT_NEAR(ref_val, val, rel_tolerance)
182 << "ref = " << ref_val << ", val = " << val << ", " << b << ", "
183 << y << ", " << x << ", " << c << ", ref qval = " << qval;
184 }
185 }
186 }
187 }
188 }
189 }
190
TestResizeBilinear(const Tensor & image_tensor,const DataType dt,const Input::Initializer & new_size,const bool show_time,const int64_t iterations,const float min,const float max,const bool half_pixel_centers,std::vector<Tensor> * outputs)191 void TestResizeBilinear(const Tensor& image_tensor, const DataType dt,
192 const Input::Initializer& new_size,
193 const bool show_time, const int64_t iterations,
194 const float min, const float max,
195 const bool half_pixel_centers,
196 std::vector<Tensor>* outputs) {
197 Scope root = Scope::NewRootScope();
198
199 Output placeholder = ops::Placeholder(root.WithOpName("placeholder"), dt);
200 Output size = ops::Const<int32>(root.WithOpName("size"), new_size);
201 Output in_min = ops::Const<float>(root.WithOpName("min"), min);
202 Output in_max = ops::Const<float>(root.WithOpName("max"), max);
203
204 ops::QuantizedResizeBilinear qrb = ops::QuantizedResizeBilinear(
205 root.WithOpName("qrb"), placeholder, size, in_min, in_max,
206 ops::QuantizedResizeBilinear::HalfPixelCenters(half_pixel_centers));
207
208 TF_EXPECT_OK(root.status());
209
210 ClientSession session(root);
211
212 int64_t total_duration = 0;
213 outputs->clear();
214
215 for (int i = 0; i < iterations; ++i) {
216 const int64_t start_time = Env::Default()->NowMicros();
217 TF_EXPECT_OK(session.Run({{placeholder, image_tensor}},
218 {qrb.resized_images, qrb.out_min, qrb.out_max},
219 outputs));
220 const int64_t end_time = Env::Default()->NowMicros();
221 total_duration += end_time - start_time;
222 }
223 const int64_t one_run_duration = total_duration / iterations;
224
225 const int64_t num_ops = outputs->at(0).NumElements();
226
227 const double million_ops_per_second =
228 (iterations * num_ops) / static_cast<double>(total_duration);
229
230 if (show_time) {
231 LOG(INFO) << "Time resize bilinear: "
232 << TensorShape(image_tensor.shape()).DebugString()
233 << ": iterations=" << iterations
234 << ", MOps/s=" << million_ops_per_second
235 << ", one_run_duration=" << one_run_duration
236 << ", total_duration=" << total_duration;
237 }
238 }
239
240 } // namespace
241
TestResizeBilinearOneDim()242 void TestResizeBilinearOneDim() {
243 constexpr float TOLERANCE = 1.0e-5;
244 constexpr int IN_WIDTH = 128;
245 constexpr int OUT_WIDTH = 256;
246 constexpr float MIN = 0.0f;
247 constexpr float MAX = 256.0f;
248 constexpr float SCALE = static_cast<float>(IN_WIDTH) / OUT_WIDTH;
249 Tensor image_quantized_tensor(DT_QINT32, TensorShape({1, 1, IN_WIDTH, 1}));
250
251 for (int64_t i = 0; i < image_quantized_tensor.NumElements(); ++i) {
252 image_quantized_tensor.flat<qint32>()(i) =
253 FloatToQuantized<qint32>(static_cast<float>(i), MIN, MAX);
254 }
255
256 std::vector<Tensor> outputs;
257 TestResizeBilinear(image_quantized_tensor, DT_QINT32, {1, OUT_WIDTH}, false,
258 1, MIN, MAX, false, &outputs);
259 ASSERT_EQ(3, outputs.size());
260 ASSERT_EQ(OUT_WIDTH, outputs.at(0).NumElements());
261 ASSERT_EQ(4, outputs.at(0).shape().dims());
262 ASSERT_EQ(OUT_WIDTH, outputs.at(0).shape().dim_size(2));
263
264 // Manual value testing
265 for (int64_t i = 0; i < outputs.at(0).NumElements(); ++i) {
266 const float resized_image_val =
267 QuantizedToFloat<qint32>(outputs.at(0).flat<qint32>()(i), MIN, MAX);
268 float expected_val = 0.0f;
269 if (i == 0 || i == outputs.at(0).NumElements() - 1 || i % 2 == 0) {
270 expected_val = QuantizedToFloat<qint32>(
271 image_quantized_tensor.flat<qint32>()(i / 2), MIN, MAX);
272 } else {
273 const float image_val0 = QuantizedToFloat<qint32>(
274 image_quantized_tensor.flat<qint32>()(i / 2), MIN, MAX);
275 const float image_val1 = QuantizedToFloat<qint32>(
276 image_quantized_tensor.flat<qint32>()(i / 2 + 1), MIN, MAX);
277 expected_val = (image_val0 + image_val1) * SCALE;
278 }
279 VLOG(1) << "(" << i << ") " << expected_val << ", " << resized_image_val;
280 EXPECT_NEAR(expected_val, resized_image_val, RESIZE_VAL_TOLERANCE)
281 << expected_val << ", " << resized_image_val;
282 }
283
284 // Value testing with reference implementation
285 CheckTensorValue<qint32>(image_quantized_tensor.flat<qint32>().data(),
286 outputs.at(0).flat<qint32>().data(),
287 /*batch_size=*/1,
288 /*in_height=*/IN_WIDTH,
289 /*in_width=*/1,
290 /*out_height=*/OUT_WIDTH,
291 /*out_width=*/1,
292 /*channels=*/1,
293 /*align_corners=*/false,
294 /*half_pixel_centers=*/false, MIN, MAX, TOLERANCE,
295 true);
296 }
297
298 template <typename T>
RunTestResizeBilinearTwoDims(int batch_size,int in_height,int in_width,int out_height,int out_width,int channels,float tolerance,bool relative,const bool half_pixel_centers)299 void RunTestResizeBilinearTwoDims(int batch_size, int in_height, int in_width,
300 int out_height, int out_width, int channels,
301 float tolerance, bool relative,
302 const bool half_pixel_centers) {
303 constexpr float RATIO = 100.0f;
304 const float min = 0.0f;
305 const float max = batch_size * in_height * in_width * channels / RATIO;
306
307 const Tensor image_quantized_tensor = BuildTensor<T>(
308 batch_size, in_height, in_width, channels, RATIO, min, max);
309
310 std::vector<Tensor> outputs;
311 TestResizeBilinear(image_quantized_tensor, DataTypeToEnum<T>::value,
312 {out_height, out_width}, false, 1, min, max,
313 half_pixel_centers, &outputs);
314 CheckTensorValue<T>(
315 image_quantized_tensor.flat<T>().data(), outputs.at(0).flat<T>().data(),
316 batch_size, in_height, in_width, out_height, out_width, channels,
317 /*align_corners=*/false,
318 /*half_pixel_centers=*/half_pixel_centers, min, max, tolerance, relative);
319 }
320
321 template <typename T>
RunBenchmarkResizeBilinearTwoDims(int batch_size,int in_height,int in_width,int out_height,int out_width,int channels,int iteration,const bool half_pixel_centers)322 void RunBenchmarkResizeBilinearTwoDims(int batch_size, int in_height,
323 int in_width, int out_height,
324 int out_width, int channels,
325 int iteration,
326 const bool half_pixel_centers) {
327 constexpr float RATIO = 100.0f;
328 const float min = 0.0f;
329 const float max = batch_size * in_height * in_width * channels / RATIO;
330
331 const Tensor image_quantized_tensor = BuildTensor<T>(
332 batch_size, in_height, in_width, channels, RATIO, min, max);
333
334 std::vector<Tensor> outputs;
335 TestResizeBilinear(image_quantized_tensor, DataTypeToEnum<T>::value,
336 {out_height, out_width}, true, iteration, min, max, false,
337 &outputs);
338 }
339
340 template <typename T>
TestResizeBilinearTwoDimsType(const float tolerance,const bool relative,const bool half_pixel_centers)341 void TestResizeBilinearTwoDimsType(const float tolerance, const bool relative,
342 const bool half_pixel_centers) {
343 RunTestResizeBilinearTwoDims<T>(1, 1, 1, 1, 1, 1, tolerance, relative,
344 half_pixel_centers);
345 RunTestResizeBilinearTwoDims<T>(1, 1, 128, 1, 256, 1, tolerance, relative,
346 half_pixel_centers);
347 RunTestResizeBilinearTwoDims<T>(1, 128, 1, 256, 1, 1, tolerance, relative,
348 half_pixel_centers);
349 RunTestResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 1, tolerance, relative,
350 half_pixel_centers);
351 RunTestResizeBilinearTwoDims<T>(1, 256, 256, 128, 128, 1, tolerance, relative,
352 half_pixel_centers);
353 RunTestResizeBilinearTwoDims<T>(1, 1, 128, 1, 256, 2, tolerance, relative,
354 half_pixel_centers);
355 RunTestResizeBilinearTwoDims<T>(1, 128, 1, 256, 1, 2, tolerance, relative,
356 half_pixel_centers);
357 RunTestResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 2, tolerance, relative,
358 half_pixel_centers);
359 RunTestResizeBilinearTwoDims<T>(1, 256, 256, 128, 128, 2, tolerance, relative,
360 half_pixel_centers);
361 RunTestResizeBilinearTwoDims<T>(1, 1, 16, 1, 32, 3, tolerance, relative,
362 half_pixel_centers);
363 RunTestResizeBilinearTwoDims<T>(1, 1, 128, 1, 256, 3, tolerance, relative,
364 half_pixel_centers);
365 RunTestResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 3, tolerance, relative,
366 half_pixel_centers);
367 RunTestResizeBilinearTwoDims<T>(1, 256, 256, 128, 128, 3, tolerance, relative,
368 half_pixel_centers);
369 }
370
TestResizeBilinearTwoDims()371 void TestResizeBilinearTwoDims() {
372 for (const bool half_pixel_centers : {false, true}) {
373 TestResizeBilinearTwoDimsType<quint8>(1.0f, false, half_pixel_centers);
374 TestResizeBilinearTwoDimsType<qint32>(1.0e-5, true, half_pixel_centers);
375 TestResizeBilinearTwoDimsType<float>(1.0e-5, true, half_pixel_centers);
376 }
377 }
378
379 template <typename T>
RunBenchmarkResizeBilinearTwoDimsType()380 void RunBenchmarkResizeBilinearTwoDimsType() {
381 constexpr int ITER = 100;
382 RunBenchmarkResizeBilinearTwoDims<T>(1, 1, 1, 2, 2, 1, ITER, false);
383 RunBenchmarkResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 1, ITER, false);
384 RunBenchmarkResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 3, ITER, false);
385 RunBenchmarkResizeBilinearTwoDims<T>(1, 64, 64, 128, 128, 2, ITER, false);
386 RunBenchmarkResizeBilinearTwoDims<T>(1, 32, 32, 64, 64, 16, ITER, false);
387 }
388
RunBenchmarkResizeBilinearTwoDims()389 void RunBenchmarkResizeBilinearTwoDims() {
390 LOG(INFO) << "Benchmark quint8";
391 RunBenchmarkResizeBilinearTwoDimsType<quint8>();
392 LOG(INFO) << "Benchmark qint32";
393 RunBenchmarkResizeBilinearTwoDimsType<qint32>();
394 LOG(INFO) << "Benchmark float";
395 RunBenchmarkResizeBilinearTwoDimsType<float>();
396 }
397
398 } // namespace tensorflow
399
400 #define RUN_TEST(t) \
401 TEST(QuantizationResizeBilinearTest, t) { tensorflow::t(); }
402
403 RUN_TEST(TestResizeBilinearOneDim);
404 RUN_TEST(TestResizeBilinearTwoDims);
405
406 #if defined(__ANDROID__)
407
408 RUN_TEST(RunBenchmarkResizeBilinearTwoDims);
409
410 #endif // __ANDROID__
411
main(int argc,char ** argv)412 int main(int argc, char** argv) {
413 // On Linux, add: absl::SetFlag(&FLAGS_logtostderr, true);
414 ::testing::InitGoogleTest(&argc, argv);
415 return RUN_ALL_TESTS();
416 }
417