• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #pragma once
7 
8 #include <gtest/gtest.h>
9 
10 #include <algorithm>
11 #include <cmath>
12 #include <cassert>
13 #include <cstddef>
14 #include <cstdlib>
15 #include <functional>
16 #include <random>
17 #include <vector>
18 
19 #include <xnnpack.h>
20 
21 
22 class ResizeBilinearOperatorTester {
23  public:
input_size(size_t input_height,size_t input_width)24   inline ResizeBilinearOperatorTester& input_size(size_t input_height, size_t input_width) {
25     assert(input_height >= 1);
26     assert(input_width >= 1);
27     this->input_height_ = input_height;
28     this->input_width_ = input_width;
29     return *this;
30   }
31 
input_height(size_t input_height)32   inline ResizeBilinearOperatorTester& input_height(size_t input_height) {
33     assert(input_height >= 1);
34     this->input_height_ = input_height;
35     return *this;
36   }
37 
input_height()38   inline size_t input_height() const {
39     return this->input_height_;
40   }
41 
input_width(size_t input_width)42   inline ResizeBilinearOperatorTester& input_width(size_t input_width) {
43     assert(input_width >= 1);
44     this->input_width_ = input_width;
45     return *this;
46   }
47 
input_width()48   inline size_t input_width() const {
49     return this->input_width_;
50   }
51 
output_size(size_t output_height,size_t output_width)52   inline ResizeBilinearOperatorTester& output_size(size_t output_height, size_t output_width) {
53     assert(output_height >= 1);
54     assert(output_width >= 1);
55     this->output_height_ = output_height;
56     this->output_width_ = output_width;
57     return *this;
58   }
59 
output_height(size_t output_height)60   inline ResizeBilinearOperatorTester& output_height(size_t output_height) {
61     assert(output_height >= 1);
62     this->output_height_ = output_height;
63     return *this;
64   }
65 
output_height()66   inline size_t output_height() const {
67     return this->output_height_;
68   }
69 
output_width(size_t output_width)70   inline ResizeBilinearOperatorTester& output_width(size_t output_width) {
71     assert(output_width >= 1);
72     this->output_width_ = output_width;
73     return *this;
74   }
75 
output_width()76   inline size_t output_width() const {
77     return this->output_width_;
78   }
79 
height_scale()80   inline float height_scale() const {
81     if (align_corners() && output_height() > 1) {
82       return float(input_height() - 1) / float(output_height() - 1);
83     } else {
84       return float(input_height()) / float(output_height());
85     }
86   }
87 
width_scale()88   inline float width_scale() const {
89     if (align_corners() && output_width() > 1) {
90       return float(input_width() - 1) / float(output_width() - 1);
91     } else {
92       return float(input_width()) / float(output_width());
93     }
94   }
95 
channels(size_t channels)96   inline ResizeBilinearOperatorTester& channels(size_t channels) {
97     assert(channels != 0);
98     this->channels_ = channels;
99     return *this;
100   }
101 
channels()102   inline size_t channels() const {
103     return this->channels_;
104   }
105 
batch_size(size_t batch_size)106   inline ResizeBilinearOperatorTester& batch_size(size_t batch_size) {
107     assert(batch_size != 0);
108     this->batch_size_ = batch_size;
109     return *this;
110   }
111 
batch_size()112   inline size_t batch_size() const {
113     return this->batch_size_;
114   }
115 
input_pixel_stride(size_t input_pixel_stride)116   inline ResizeBilinearOperatorTester& input_pixel_stride(size_t input_pixel_stride) {
117     assert(input_pixel_stride != 0);
118     this->input_pixel_stride_ = input_pixel_stride;
119     return *this;
120   }
121 
input_pixel_stride()122   inline size_t input_pixel_stride() const {
123     if (this->input_pixel_stride_ == 0) {
124       return channels();
125     } else {
126       assert(this->input_pixel_stride_ >= channels());
127       return this->input_pixel_stride_;
128     }
129   }
130 
output_pixel_stride(size_t output_pixel_stride)131   inline ResizeBilinearOperatorTester& output_pixel_stride(size_t output_pixel_stride) {
132     assert(output_pixel_stride != 0);
133     this->output_pixel_stride_ = output_pixel_stride;
134     return *this;
135   }
136 
output_pixel_stride()137   inline size_t output_pixel_stride() const {
138     if (this->output_pixel_stride_ == 0) {
139       return channels();
140     } else {
141       assert(this->output_pixel_stride_ >= channels());
142       return this->output_pixel_stride_;
143     }
144   }
145 
next_input_size(uint32_t next_input_height,uint32_t next_input_width)146   inline ResizeBilinearOperatorTester& next_input_size(uint32_t next_input_height, uint32_t next_input_width) {
147     assert(next_input_height >= 1);
148     assert(next_input_width >= 1);
149     this->next_input_height_ = next_input_height;
150     this->next_input_width_ = next_input_width;
151     return *this;
152   }
153 
next_input_height(uint32_t next_input_height)154   inline ResizeBilinearOperatorTester& next_input_height(uint32_t next_input_height) {
155     assert(next_input_height >= 1);
156     this->next_input_height_ = next_input_height;
157     return *this;
158   }
159 
next_input_height()160   inline uint32_t next_input_height() const {
161     if (this->next_input_height_ == 0) {
162       return input_height();
163     } else {
164       return this->next_input_height_;
165     }
166   }
167 
next_input_width(uint32_t next_input_width)168   inline ResizeBilinearOperatorTester& next_input_width(uint32_t next_input_width) {
169     assert(next_input_width >= 1);
170     this->next_input_width_ = next_input_width;
171     return *this;
172   }
173 
next_input_width()174   inline uint32_t next_input_width() const {
175     if (this->next_input_width_ == 0) {
176       return input_width();
177     } else {
178       return this->next_input_width_;
179     }
180   }
181 
next_batch_size(size_t next_batch_size)182   inline ResizeBilinearOperatorTester& next_batch_size(size_t next_batch_size) {
183     assert(next_batch_size >= 1);
184     this->next_batch_size_ = next_batch_size;
185     return *this;
186   }
187 
next_batch_size()188   inline size_t next_batch_size() const {
189     if (this->next_batch_size_ == 0) {
190       return batch_size();
191     } else {
192       return this->next_batch_size_;
193     }
194   }
195 
align_corners(bool align_corners)196   inline ResizeBilinearOperatorTester& align_corners(bool align_corners) {
197     this->align_corners_ = align_corners;
198     return *this;
199   }
200 
align_corners()201   inline bool align_corners() const {
202     return this->align_corners_;
203   }
204 
tf_legacy_mode(bool tf_legacy_mode)205   inline ResizeBilinearOperatorTester& tf_legacy_mode(bool tf_legacy_mode) {
206     this->tf_legacy_mode_ = tf_legacy_mode;
207     return *this;
208   }
209 
tf_legacy_mode()210   inline bool tf_legacy_mode() const {
211     return this->tf_legacy_mode_;
212   }
213 
iterations(size_t iterations)214   inline ResizeBilinearOperatorTester& iterations(size_t iterations) {
215     this->iterations_ = iterations;
216     return *this;
217   }
218 
iterations()219   inline size_t iterations() const {
220     return this->iterations_;
221   }
222 
TestNHWCxF32()223   void TestNHWCxF32() const {
224     if (align_corners()) {
225       ASSERT_FALSE(tf_legacy_mode());
226     }
227 
228     std::random_device random_device;
229     auto rng = std::mt19937(random_device());
230     auto f32rng = std::bind(std::uniform_real_distribution<float>(), rng);
231 
232     std::vector<float> input((batch_size() * input_height() * input_width() - 1) * input_pixel_stride() + channels() + XNN_EXTRA_BYTES / sizeof(float));
233     std::vector<float> output((batch_size() * output_height() * output_width() - 1) * output_pixel_stride() + channels());
234     std::vector<float> output_ref(batch_size() * output_height() * output_width() * channels());
235     for (size_t iteration = 0; iteration < iterations(); iteration++) {
236       std::generate(input.begin(), input.end(), std::ref(f32rng));
237       std::fill(output.begin(), output.end(), std::nanf(""));
238 
239       // Compute reference results.
240       const float offset = (tf_legacy_mode() || align_corners()) ? 0.0f : 0.5f;
241       for (size_t batch_index = 0; batch_index < batch_size(); batch_index++) {
242         for (size_t output_y = 0; output_y < output_height(); output_y++) {
243           const float input_y = (float(output_y) + offset) * height_scale() - offset;
244           const int64_t input_y_top = std::max<int64_t>(int64_t(std::floor(input_y)), 0);
245           const int64_t input_y_bottom = std::min<int64_t>(int64_t(std::ceil(input_y)), input_height() - 1);
246           const float y_alpha = input_y - std::floor(input_y);
247           for (size_t output_x = 0; output_x < output_width(); output_x++) {
248             const float input_x = (float(output_x) + offset) * width_scale() - offset;
249             const int64_t input_x_left = std::max<int64_t>(int64_t(std::floor(input_x)), 0);
250             const int64_t input_x_right = std::min<int64_t>(int64_t(std::ceil(input_x)), input_width() - 1);
251             const float x_alpha = input_x - std::floor(input_x);
252             for (size_t c = 0; c < channels(); c++) {
253               output_ref[((batch_index * output_height() + output_y) * output_width() + output_x) * channels() + c] =
254                 input[((batch_index * input_height() + input_y_top) * input_width() + input_x_left) * input_pixel_stride() + c] * (1.0f - y_alpha) * (1.0f - x_alpha) +
255                 input[((batch_index * input_height() + input_y_top) * input_width() + input_x_right) * input_pixel_stride() + c] * (1.0f - y_alpha) * x_alpha +
256                 input[((batch_index * input_height() + input_y_bottom) * input_width() + input_x_left) * input_pixel_stride() + c] * y_alpha * (1.0f - x_alpha) +
257                 input[((batch_index * input_height() + input_y_bottom) * input_width() + input_x_right) * input_pixel_stride() + c] * y_alpha * x_alpha;
258             }
259           }
260         }
261       }
262 
263       // Create, setup, run, and destroy Resize Bilinear operator.
264       ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */));
265       xnn_operator_t resize_bilinear_op = nullptr;
266 
267       ASSERT_EQ(xnn_status_success,
268         xnn_create_resize_bilinear2d_nhwc_f32(
269           channels(), input_pixel_stride(), output_pixel_stride(),
270           (align_corners() ? XNN_FLAG_ALIGN_CORNERS : 0) | (tf_legacy_mode() ? XNN_FLAG_TENSORFLOW_LEGACY_MODE : 0),
271           &resize_bilinear_op));
272       ASSERT_NE(nullptr, resize_bilinear_op);
273 
274       // Smart pointer to automatically delete resize_bilinear_op.
275       std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_resize_bilinear_op(resize_bilinear_op, xnn_delete_operator);
276 
277       ASSERT_EQ(xnn_status_success,
278         xnn_setup_resize_bilinear2d_nhwc_f32(
279           resize_bilinear_op,
280           batch_size(), input_height(), input_width(),
281           output_height(), output_width(),
282           input.data(), output.data(),
283           nullptr /* thread pool */));
284 
285       ASSERT_EQ(xnn_status_success,
286         xnn_run_operator(resize_bilinear_op, nullptr /* thread pool */));
287 
288       // Verify results.
289       for (size_t i = 0; i < batch_size(); i++) {
290         for (size_t y = 0; y < output_height(); y++) {
291           for (size_t x = 0; x < output_width(); x++) {
292             for (size_t c = 0; c < channels(); c++) {
293               ASSERT_NEAR(output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c],
294                   output_ref[((i * output_height() + y) * output_width() + x) * channels() + c],
295                   std::abs(output_ref[((i * output_height() + y) * output_width() + x) * channels() + c]) * 1.0e-5f) <<
296                 "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c;
297             }
298           }
299         }
300       }
301     }
302   }
303 
TestNHWCxS8()304   void TestNHWCxS8() const {
305     if (align_corners()) {
306       ASSERT_FALSE(tf_legacy_mode());
307     }
308 
309     std::random_device random_device;
310     auto rng = std::mt19937(random_device());
311     auto i8rng = std::bind(std::uniform_int_distribution<int32_t>(
312       std::numeric_limits<int8_t>::min(), std::numeric_limits<int8_t>::max()), std::ref(rng));
313 
314     std::vector<int8_t> input((batch_size() * input_height() * input_width() - 1) * input_pixel_stride() + channels() + XNN_EXTRA_BYTES / sizeof(int8_t));
315     std::vector<int8_t> output((batch_size() * output_height() * output_width() - 1) * output_pixel_stride() + channels());
316     std::vector<float> output_ref(batch_size() * output_height() * output_width() * channels());
317     for (size_t iteration = 0; iteration < iterations(); iteration++) {
318       std::generate(input.begin(), input.end(), std::ref(i8rng));
319       std::fill(output.begin(), output.end(), INT8_C(0xA5));
320 
321       // Compute reference results.
322       const float offset = (tf_legacy_mode() || align_corners()) ? 0.0f : 0.5f;
323       for (size_t batch_index = 0; batch_index < batch_size(); batch_index++) {
324         for (size_t output_y = 0; output_y < output_height(); output_y++) {
325           const float input_y = (float(output_y) + offset) * height_scale() - offset;
326           const int64_t input_y_top = std::max<int64_t>(int64_t(std::floor(input_y)), 0);
327           const int64_t input_y_bottom = std::min<int64_t>(int64_t(std::ceil(input_y)), input_height() - 1);
328           const float y_alpha = input_y - std::floor(input_y);
329           for (size_t output_x = 0; output_x < output_width(); output_x++) {
330             const float input_x = (float(output_x) + offset) * width_scale() - offset;
331             const int64_t input_x_left = std::max<int64_t>(int64_t(std::floor(input_x)), 0);
332             const int64_t input_x_right = std::min<int64_t>(int64_t(std::ceil(input_x)), input_width() - 1);
333             const float x_alpha = input_x - std::floor(input_x);
334             for (size_t c = 0; c < channels(); c++) {
335               output_ref[((batch_index * output_height() + output_y) * output_width() + output_x) * channels() + c] =
336                 float(int32_t(input[((batch_index * input_height() + input_y_top) * input_width() + input_x_left) * input_pixel_stride() + c])) * (1.0f - y_alpha) * (1.0f - x_alpha) +
337                 float(int32_t(input[((batch_index * input_height() + input_y_top) * input_width() + input_x_right) * input_pixel_stride() + c])) * (1.0f - y_alpha) * x_alpha +
338                 float(int32_t(input[((batch_index * input_height() + input_y_bottom) * input_width() + input_x_left) * input_pixel_stride() + c])) * y_alpha * (1.0f - x_alpha) +
339                 float(int32_t(input[((batch_index * input_height() + input_y_bottom) * input_width() + input_x_right) * input_pixel_stride() + c])) * y_alpha * x_alpha;
340             }
341           }
342         }
343       }
344 
345       // Create, setup, run, and destroy Resize Bilinear operator.
346       ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */));
347       xnn_operator_t resize_bilinear_op = nullptr;
348 
349       ASSERT_EQ(xnn_status_success,
350         xnn_create_resize_bilinear2d_nhwc_s8(
351           channels(), input_pixel_stride(), output_pixel_stride(),
352           (align_corners() ? XNN_FLAG_ALIGN_CORNERS : 0) | (tf_legacy_mode() ? XNN_FLAG_TENSORFLOW_LEGACY_MODE : 0),
353           &resize_bilinear_op));
354       ASSERT_NE(nullptr, resize_bilinear_op);
355 
356       // Smart pointer to automatically delete resize_bilinear_op.
357       std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_resize_bilinear_op(resize_bilinear_op, xnn_delete_operator);
358 
359       ASSERT_EQ(xnn_status_success,
360         xnn_setup_resize_bilinear2d_nhwc_s8(
361           resize_bilinear_op,
362           batch_size(), input_height(), input_width(),
363           output_height(), output_width(),
364           input.data(), output.data(),
365           nullptr /* thread pool */));
366 
367       ASSERT_EQ(xnn_status_success,
368         xnn_run_operator(resize_bilinear_op, nullptr /* thread pool */));
369 
370       // Verify results.
371       for (size_t i = 0; i < batch_size(); i++) {
372         for (size_t y = 0; y < output_height(); y++) {
373           for (size_t x = 0; x < output_width(); x++) {
374             for (size_t c = 0; c < channels(); c++) {
375               ASSERT_NEAR(
376                   float(int32_t(output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c])),
377                   output_ref[((i * output_height() + y) * output_width() + x) * channels() + c],
378                   0.6f) <<
379                 "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c;
380             }
381           }
382         }
383       }
384     }
385   }
386 
TestNHWCxU8()387   void TestNHWCxU8() const {
388     if (align_corners()) {
389       ASSERT_FALSE(tf_legacy_mode());
390     }
391 
392     std::random_device random_device;
393     auto rng = std::mt19937(random_device());
394     auto u8rng = std::bind(
395       std::uniform_int_distribution<uint32_t>(0, std::numeric_limits<uint8_t>::max()), std::ref(rng));
396 
397     std::vector<uint8_t> input((batch_size() * input_height() * input_width() - 1) * input_pixel_stride() + channels() + XNN_EXTRA_BYTES / sizeof(uint8_t));
398     std::vector<uint8_t> output((batch_size() * output_height() * output_width() - 1) * output_pixel_stride() + channels());
399     std::vector<float> output_ref(batch_size() * output_height() * output_width() * channels());
400     for (size_t iteration = 0; iteration < iterations(); iteration++) {
401       std::generate(input.begin(), input.end(), std::ref(u8rng));
402       std::fill(output.begin(), output.end(), UINT8_C(0xA5));
403 
404       // Compute reference results.
405       const float offset = (tf_legacy_mode() || align_corners()) ? 0.0f : 0.5f;
406       for (size_t batch_index = 0; batch_index < batch_size(); batch_index++) {
407         for (size_t output_y = 0; output_y < output_height(); output_y++) {
408           const float input_y = (float(output_y) + offset) * height_scale() - offset;
409           const int64_t input_y_top = std::max<int64_t>(int64_t(std::floor(input_y)), 0);
410           const int64_t input_y_bottom = std::min<int64_t>(int64_t(std::ceil(input_y)), input_height() - 1);
411           const float y_alpha = input_y - std::floor(input_y);
412           for (size_t output_x = 0; output_x < output_width(); output_x++) {
413             const float input_x = (float(output_x) + offset) * width_scale() - offset;
414             const int64_t input_x_left = std::max<int64_t>(int64_t(std::floor(input_x)), 0);
415             const int64_t input_x_right = std::min<int64_t>(int64_t(std::ceil(input_x)), input_width() - 1);
416             const float x_alpha = input_x - std::floor(input_x);
417             for (size_t c = 0; c < channels(); c++) {
418               output_ref[((batch_index * output_height() + output_y) * output_width() + output_x) * channels() + c] =
419                 float(int32_t(input[((batch_index * input_height() + input_y_top) * input_width() + input_x_left) * input_pixel_stride() + c])) * (1.0f - y_alpha) * (1.0f - x_alpha) +
420                 float(int32_t(input[((batch_index * input_height() + input_y_top) * input_width() + input_x_right) * input_pixel_stride() + c])) * (1.0f - y_alpha) * x_alpha +
421                 float(int32_t(input[((batch_index * input_height() + input_y_bottom) * input_width() + input_x_left) * input_pixel_stride() + c])) * y_alpha * (1.0f - x_alpha) +
422                 float(int32_t(input[((batch_index * input_height() + input_y_bottom) * input_width() + input_x_right) * input_pixel_stride() + c])) * y_alpha * x_alpha;
423             }
424           }
425         }
426       }
427 
428       // Create, setup, run, and destroy Resize Bilinear operator.
429       ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */));
430       xnn_operator_t resize_bilinear_op = nullptr;
431 
432       ASSERT_EQ(xnn_status_success,
433         xnn_create_resize_bilinear2d_nhwc_u8(
434           channels(), input_pixel_stride(), output_pixel_stride(),
435           (align_corners() ? XNN_FLAG_ALIGN_CORNERS : 0) | (tf_legacy_mode() ? XNN_FLAG_TENSORFLOW_LEGACY_MODE : 0),
436           &resize_bilinear_op));
437       ASSERT_NE(nullptr, resize_bilinear_op);
438 
439       // Smart pointer to automatically delete resize_bilinear_op.
440       std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_resize_bilinear_op(resize_bilinear_op, xnn_delete_operator);
441 
442       ASSERT_EQ(xnn_status_success,
443         xnn_setup_resize_bilinear2d_nhwc_u8(
444           resize_bilinear_op,
445           batch_size(), input_height(), input_width(),
446           output_height(), output_width(),
447           input.data(), output.data(),
448           nullptr /* thread pool */));
449 
450       ASSERT_EQ(xnn_status_success,
451         xnn_run_operator(resize_bilinear_op, nullptr /* thread pool */));
452 
453       // Verify results.
454       for (size_t i = 0; i < batch_size(); i++) {
455         for (size_t y = 0; y < output_height(); y++) {
456           for (size_t x = 0; x < output_width(); x++) {
457             for (size_t c = 0; c < channels(); c++) {
458               ASSERT_NEAR(
459                   float(int32_t(output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c])),
460                   output_ref[((i * output_height() + y) * output_width() + x) * channels() + c],
461                   0.6f) <<
462                 "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c;
463             }
464           }
465         }
466       }
467     }
468   }
469 
TestNCHWxF32()470   void TestNCHWxF32() const {
471     if (align_corners()) {
472       ASSERT_FALSE(tf_legacy_mode());
473     }
474 
475     std::random_device random_device;
476     auto rng = std::mt19937(random_device());
477     auto f32rng = std::bind(std::uniform_real_distribution<float>(), rng);
478 
479     std::vector<float> input((batch_size() * input_height() * input_width() - 1) * input_pixel_stride() + channels() + XNN_EXTRA_BYTES / sizeof(float));
480     std::vector<float> output((batch_size() * output_height() * output_width() - 1) * output_pixel_stride() + channels());
481     std::vector<float> output_ref(batch_size() * output_height() * output_width() * channels());
482     for (size_t iteration = 0; iteration < iterations(); iteration++) {
483       std::generate(input.begin(), input.end(), std::ref(f32rng));
484       std::fill(output.begin(), output.end(), std::nanf(""));
485 
486       // Compute reference results.
487       const float offset = (tf_legacy_mode() || align_corners()) ? 0.0f : 0.5f;
488       const int64_t input_num_pixels = input_height() * input_width();
489       const int64_t input_num_elements = input_num_pixels * input_pixel_stride();
490       const int64_t output_num_pixels = output_height() * output_width();
491       const int64_t output_num_elements = output_num_pixels * channels();
492       for (size_t batch_index = 0; batch_index < batch_size(); batch_index++) {
493         for (size_t output_y = 0; output_y < output_height(); output_y++) {
494           const float input_y = (float(output_y) + offset) * height_scale() - offset;
495           const int64_t input_y_top = std::max<int64_t>(int64_t(std::floor(input_y)), 0);
496           const int64_t input_y_bottom = std::min<int64_t>(int64_t(std::ceil(input_y)), input_height() - 1);
497           const float y_alpha = input_y - std::floor(input_y);
498           for (size_t output_x = 0; output_x < output_width(); output_x++) {
499             const float input_x = (float(output_x) + offset) * width_scale() - offset;
500             const int64_t input_x_left = std::max<int64_t>(int64_t(std::floor(input_x)), 0);
501             const int64_t input_x_right = std::min<int64_t>(int64_t(std::ceil(input_x)), input_width() - 1);
502             const float x_alpha = input_x - std::floor(input_x);
503             for (size_t c = 0; c < channels(); c++) {
504               output_ref[batch_index * output_num_elements + c * output_num_pixels + output_y * output_width() + output_x] =
505                 input[batch_index * input_num_elements + c * input_num_pixels + input_y_top * input_width() + input_x_left] * (1.0f - y_alpha) * (1.0f - x_alpha) +
506                 input[batch_index * input_num_elements + c * input_num_pixels + input_y_top * input_width() + input_x_right] * (1.0f - y_alpha) * x_alpha +
507                 input[batch_index * input_num_elements + c * input_num_pixels + input_y_bottom * input_width() + input_x_left] * y_alpha * (1.0f - x_alpha) +
508                 input[batch_index * input_num_elements + c * input_num_pixels + input_y_bottom * input_width() + input_x_right] * y_alpha * x_alpha;
509             }
510           }
511         }
512       }
513 
514       // Create, setup, run, and destroy Resize Bilinear operator.
515       ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */));
516       xnn_operator_t resize_bilinear_op = nullptr;
517 
518       ASSERT_EQ(xnn_status_success,
519         xnn_create_resize_bilinear2d_nchw_f32(
520           channels(), input_pixel_stride(), output_pixel_stride(),
521           (align_corners() ? XNN_FLAG_ALIGN_CORNERS : 0) | (tf_legacy_mode() ? XNN_FLAG_TENSORFLOW_LEGACY_MODE : 0),
522           &resize_bilinear_op));
523       ASSERT_NE(nullptr, resize_bilinear_op);
524 
525       // Smart pointer to automatically delete resize_bilinear_op.
526       std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_resize_bilinear_op(resize_bilinear_op, xnn_delete_operator);
527 
528       ASSERT_EQ(xnn_status_success,
529         xnn_setup_resize_bilinear2d_nchw_f32(
530           resize_bilinear_op,
531           batch_size(), input_height(), input_width(),
532           output_height(), output_width(),
533           input.data(), output.data(),
534           nullptr /* thread pool */));
535 
536       ASSERT_EQ(xnn_status_success,
537         xnn_run_operator(resize_bilinear_op, nullptr /* thread pool */));
538 
539       // Verify results.
540       for (size_t i = 0; i < batch_size(); i++) {
541         for (size_t y = 0; y < output_height(); y++) {
542           for (size_t x = 0; x < output_width(); x++) {
543             for (size_t c = 0; c < channels(); c++) {
544               ASSERT_NEAR(output[i * output_num_elements +  c * output_num_pixels + y * output_width() + x],
545                   output_ref[i * output_num_elements +  c * output_num_pixels + y * output_width() + x],
546                   1.0e-6f) <<
547                 "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c;
548             }
549           }
550         }
551       }
552     }
553   }
554 
555  private:
556   size_t input_height_{1};
557   size_t input_width_{1};
558   size_t output_height_{1};
559   size_t output_width_{1};
560   size_t channels_{1};
561   size_t batch_size_{1};
562   size_t input_pixel_stride_{0};
563   size_t output_pixel_stride_{0};
564   size_t next_input_height_{0};
565   size_t next_input_width_{0};
566   size_t next_batch_size_{0};
567   bool align_corners_{false};
568   bool tf_legacy_mode_{false};
569   size_t iterations_{1};
570 };
571