• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/delegates/xnnpack/pool_2d_tester.h"
17 
18 #include <array>
19 #include <cstdint>
20 #include <functional>
21 #include <random>
22 #include <vector>
23 
24 #include <gtest/gtest.h>
25 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
26 #include "tensorflow/lite/interpreter.h"
27 #include "tensorflow/lite/kernels/register.h"
28 #include "tensorflow/lite/model.h"
29 #include "tensorflow/lite/schema/schema_conversion_utils.h"
30 #include "tensorflow/lite/schema/schema_generated.h"
31 #include "tensorflow/lite/version.h"
32 
33 namespace tflite {
34 namespace xnnpack {
35 
Test(tflite::BuiltinOperator pool_op,TfLiteDelegate * delegate) const36 void Pool2DTester::Test(tflite::BuiltinOperator pool_op,
37                         TfLiteDelegate* delegate) const {
38   std::random_device random_device;
39   auto rng = std::mt19937(random_device());
40   auto range_rng = std::bind(
41       std::uniform_real_distribution<float>(-25.0f, 25.0f), std::ref(rng));
42 
43   std::vector<char> buffer = CreateTfLiteModel(pool_op);
44   const tflite::Model* model = tflite::GetModel(buffer.data());
45 
46   std::unique_ptr<tflite::Interpreter> delegate_interpreter;
47   ASSERT_EQ(
48       tflite::InterpreterBuilder(
49           model,
50           tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
51           &delegate_interpreter),
52       kTfLiteOk);
53   std::unique_ptr<tflite::Interpreter> default_interpreter;
54   ASSERT_EQ(
55       tflite::InterpreterBuilder(
56           model,
57           tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
58           &default_interpreter),
59       kTfLiteOk);
60 
61   ASSERT_TRUE(delegate_interpreter);
62   ASSERT_TRUE(default_interpreter);
63 
64   ASSERT_EQ(delegate_interpreter->inputs().size(), 1);
65   ASSERT_EQ(default_interpreter->inputs().size(), 1);
66 
67   ASSERT_EQ(delegate_interpreter->outputs().size(), 1);
68   ASSERT_EQ(default_interpreter->outputs().size(), 1);
69 
70   ASSERT_EQ(delegate_interpreter->AllocateTensors(), kTfLiteOk);
71   ASSERT_EQ(default_interpreter->AllocateTensors(), kTfLiteOk);
72 
73   ASSERT_EQ(delegate_interpreter->ModifyGraphWithDelegate(delegate), kTfLiteOk);
74 
75   float* default_input_data = default_interpreter->typed_tensor<float>(
76       default_interpreter->inputs()[0]);
77   for (int32_t i = 0; i < BatchSize(); i++) {
78     for (int32_t c = 0; c < Channels(); c++) {
79       // Use the same range of all-positive or all-negative values to generate
80       // all pixels within the same batch index & channel, but different ranges
81       // for different channels or batches. This ensures that no catastrophic
82       // cancellation occur, but test covers both positive and negative inputs.
83       const float range = range_rng();
84       auto value_rng =
85           std::bind(std::uniform_real_distribution<float>(
86                         std::min(range, 0.0f), std::max(range, 0.0f)),
87                     std::ref(rng));
88       for (int32_t y = 0; y < InputHeight(); y++) {
89         for (int32_t x = 0; x < InputWidth(); x++) {
90           const int32_t index =
91               ((i * InputHeight() + y) * InputWidth() + x) * Channels() + c;
92           default_input_data[index] = value_rng();
93         }
94       }
95     }
96   }
97 
98   float* xnnpack_input_data = delegate_interpreter->typed_tensor<float>(
99       delegate_interpreter->inputs()[0]);
100   std::copy(default_input_data,
101             default_input_data +
102                 BatchSize() * InputHeight() * InputWidth() * Channels(),
103             xnnpack_input_data);
104 
105   ASSERT_EQ(default_interpreter->Invoke(), kTfLiteOk);
106   ASSERT_EQ(delegate_interpreter->Invoke(), kTfLiteOk);
107 
108   float* default_output_data = default_interpreter->typed_tensor<float>(
109       default_interpreter->outputs()[0]);
110   float* xnnpack_output_data = delegate_interpreter->typed_tensor<float>(
111       delegate_interpreter->outputs()[0]);
112 
113   for (int32_t i = 0; i < BatchSize(); i++) {
114     for (int32_t y = 0; y < OutputHeight(); y++) {
115       for (int32_t x = 0; x < OutputWidth(); x++) {
116         for (int32_t c = 0; c < Channels(); c++) {
117           const int32_t index =
118               ((i * OutputHeight() + y) * OutputWidth() + x) * Channels() + c;
119           if (pool_op == BuiltinOperator_MAX_POOL_2D) {
120             // MaxPooling results must be exact
121             ASSERT_EQ(default_output_data[index], xnnpack_output_data[index])
122                 << "batch " << i << " / " << BatchSize() << ", y position " << y
123                 << " / " << OutputHeight() << ", x position " << x << " / "
124                 << OutputWidth() << ", channel " << c << " / " << Channels();
125           } else {
126             ASSERT_NEAR(default_output_data[index], xnnpack_output_data[index],
127                         std::abs(default_output_data[index]) * 3.0e-6f)
128                 << "batch " << i << " / " << BatchSize() << ", y position " << y
129                 << " / " << OutputHeight() << ", x position " << x << " / "
130                 << OutputWidth() << ", channel " << c << " / " << Channels();
131           }
132         }
133       }
134     }
135   }
136 }
137 
CreateTfLiteModel(tflite::BuiltinOperator pool_op) const138 std::vector<char> Pool2DTester::CreateTfLiteModel(
139     tflite::BuiltinOperator pool_op) const {
140   flatbuffers::FlatBufferBuilder builder;
141   flatbuffers::Offset<tflite::OperatorCode> operator_code =
142       CreateOperatorCode(builder, pool_op, 0);
143 
144   flatbuffers::Offset<tflite::Pool2DOptions> pool_2d_options =
145       CreatePool2DOptions(builder, Padding(), StrideWidth(), StrideHeight(),
146                           PoolingWidth(), PoolingHeight(), Activation());
147 
148   const flatbuffers::Offset<tflite::Buffer> null_buffer =
149       tflite::CreateBuffer(builder, builder.CreateVector({}));
150 
151   const std::array<int32_t, 4> input_shape{
152       {BatchSize(), InputHeight(), InputWidth(), Channels()}};
153   const std::array<int32_t, 4> output_shape{
154       {BatchSize(), OutputHeight(), OutputWidth(), Channels()}};
155 
156   const std::array<flatbuffers::Offset<tflite::Tensor>, 2> tensors{{
157       tflite::CreateTensor(
158           builder,
159           builder.CreateVector<int32_t>(input_shape.data(), input_shape.size()),
160           tflite::TensorType_FLOAT32),
161       tflite::CreateTensor(builder,
162                            builder.CreateVector<int32_t>(output_shape.data(),
163                                                          output_shape.size()),
164                            tflite::TensorType_FLOAT32),
165   }};
166 
167   const std::array<int32_t, 1> op_inputs{{0}};
168   const std::array<int32_t, 1> op_outputs{{1}};
169 
170   flatbuffers::Offset<tflite::Operator> op = CreateOperator(
171       builder, /*opcode_index=*/0,
172       builder.CreateVector<int32_t>(op_inputs.data(), op_inputs.size()),
173       builder.CreateVector<int32_t>(op_outputs.data(), op_outputs.size()),
174       tflite::BuiltinOptions_Pool2DOptions, pool_2d_options.Union());
175 
176   const std::array<int32_t, 1> subgraph_inputs{{0}};
177   const std::array<int32_t, 1> subgraph_outputs{{1}};
178   flatbuffers::Offset<tflite::SubGraph> subgraph = CreateSubGraph(
179       builder, builder.CreateVector(tensors.data(), tensors.size()),
180       builder.CreateVector<int32_t>(subgraph_inputs.data(),
181                                     subgraph_inputs.size()),
182       builder.CreateVector<int32_t>(subgraph_outputs.data(),
183                                     subgraph_outputs.size()),
184       builder.CreateVector(&op, 1));
185 
186   flatbuffers::Offset<flatbuffers::String> description =
187       builder.CreateString("Pool2D model");
188 
189   flatbuffers::Offset<tflite::Model> model_buffer = tflite::CreateModel(
190       builder, TFLITE_SCHEMA_VERSION, builder.CreateVector(&operator_code, 1),
191       builder.CreateVector(&subgraph, 1), description,
192       builder.CreateVector(&null_buffer, 1));
193 
194   builder.Finish(model_buffer);
195 
196   return std::vector<char>(builder.GetBufferPointer(),
197                            builder.GetBufferPointer() + builder.GetSize());
198 }
199 
200 }  // namespace xnnpack
201 }  // namespace tflite
202