• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/delegates/xnnpack/reduce_tester.h"
17 
18 #include <algorithm>
19 #include <array>
20 #include <cstdint>
21 #include <functional>
22 #include <numeric>
23 #include <random>
24 #include <vector>
25 
26 #include <gtest/gtest.h>
27 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
28 #include "tensorflow/lite/interpreter.h"
29 #include "tensorflow/lite/kernels/register.h"
30 #include "tensorflow/lite/model.h"
31 #include "tensorflow/lite/schema/schema_conversion_utils.h"
32 #include "tensorflow/lite/schema/schema_generated.h"
33 #include "tensorflow/lite/version.h"
34 
35 namespace tflite {
36 namespace xnnpack {
37 
Test(tflite::BuiltinOperator reduce_op,TfLiteDelegate * delegate) const38 void ReduceTester::Test(tflite::BuiltinOperator reduce_op,
39                         TfLiteDelegate* delegate) const {
40   std::random_device random_device;
41   auto rng = std::mt19937(random_device());
42   auto input_rng =
43       std::bind(std::uniform_real_distribution<float>(), std::ref(rng));
44 
45   std::vector<char> buffer = CreateTfLiteModel(reduce_op);
46   const Model* model = GetModel(buffer.data());
47 
48   std::unique_ptr<Interpreter> delegate_interpreter;
49   ASSERT_EQ(
50       InterpreterBuilder(
51           model,
52           ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
53           &delegate_interpreter),
54       kTfLiteOk);
55   std::unique_ptr<Interpreter> default_interpreter;
56   ASSERT_EQ(
57       InterpreterBuilder(
58           model,
59           ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
60           &default_interpreter),
61       kTfLiteOk);
62 
63   ASSERT_TRUE(delegate_interpreter);
64   ASSERT_TRUE(default_interpreter);
65 
66   ASSERT_EQ(delegate_interpreter->inputs().size(), 1);
67   ASSERT_EQ(default_interpreter->inputs().size(), 1);
68 
69   ASSERT_EQ(delegate_interpreter->outputs().size(), 1);
70   ASSERT_EQ(default_interpreter->outputs().size(), 1);
71 
72   ASSERT_EQ(delegate_interpreter->AllocateTensors(), kTfLiteOk);
73   ASSERT_EQ(default_interpreter->AllocateTensors(), kTfLiteOk);
74 
75   ASSERT_EQ(delegate_interpreter->ModifyGraphWithDelegate(delegate), kTfLiteOk);
76 
77   float* default_input_data = default_interpreter->typed_input_tensor<float>(0);
78   std::generate(default_input_data, default_input_data + InputSize(),
79                 std::ref(input_rng));
80 
81   float* delegate_input_data =
82       delegate_interpreter->typed_input_tensor<float>(0);
83   std::copy(default_input_data, default_input_data + InputSize(),
84             delegate_input_data);
85 
86   ASSERT_EQ(default_interpreter->Invoke(), kTfLiteOk);
87   ASSERT_EQ(delegate_interpreter->Invoke(), kTfLiteOk);
88 
89   float* default_output_data =
90       default_interpreter->typed_output_tensor<float>(0);
91   float* delegate_output_data =
92       delegate_interpreter->typed_output_tensor<float>(0);
93 
94   const int32_t output_size = OutputSize();
95   for (size_t i = 0; i < output_size; i++) {
96     ASSERT_NEAR(
97         default_output_data[i], delegate_output_data[i],
98         std::numeric_limits<float>::epsilon() *
99             std::max(std::abs(default_output_data[i]) * RelativeTolerance(),
100                      1.0f));
101   }
102 }
103 
CreateTfLiteModel(tflite::BuiltinOperator reduce_op) const104 std::vector<char> ReduceTester::CreateTfLiteModel(
105     tflite::BuiltinOperator reduce_op) const {
106   flatbuffers::FlatBufferBuilder builder;
107   flatbuffers::Offset<OperatorCode> operator_code =
108       CreateOperatorCode(builder, reduce_op);
109 
110   const std::array<flatbuffers::Offset<Buffer>, 2> buffers{{
111       CreateBuffer(builder, builder.CreateVector({})),
112       CreateBuffer(builder, builder.CreateVector(
113                                 reinterpret_cast<const uint8_t*>(Axes().data()),
114                                 sizeof(int32_t) * Axes().size())),
115   }};
116 
117   const std::vector<int32_t> output_shape = OutputShape();
118   const std::array<int32_t, 1> axes_shape{
119       {static_cast<int32_t>(Axes().size())}};
120   const std::array<flatbuffers::Offset<Tensor>, 3> tensors{{
121       CreateTensor(builder,
122                    builder.CreateVector<int32_t>(InputShape().data(),
123                                                  InputShape().size()),
124                    TensorType_FLOAT32),
125       CreateTensor(
126           builder,
127           builder.CreateVector<int32_t>(axes_shape.data(), axes_shape.size()),
128           TensorType_INT32, /*buffer=*/1),
129       CreateTensor(builder,
130                    builder.CreateVector<int32_t>(output_shape.data(),
131                                                  output_shape.size()),
132                    TensorType_FLOAT32),
133   }};
134 
135   const flatbuffers::Offset<ReducerOptions> reducer_options =
136       CreateReducerOptions(builder, KeepDims());
137 
138   const std::array<int32_t, 2> op_inputs{{0, 1}};
139   const std::array<int32_t, 1> op_outputs{{2}};
140   flatbuffers::Offset<Operator> op = CreateOperator(
141       builder, /*opcode_index=*/0,
142       builder.CreateVector<int32_t>(op_inputs.data(), op_inputs.size()),
143       builder.CreateVector<int32_t>(op_outputs.data(), op_outputs.size()),
144       tflite::BuiltinOptions_ReducerOptions, reducer_options.Union());
145 
146   const std::array<int32_t, 1> subgraph_inputs{{0}};
147   const std::array<int32_t, 1> subgraph_outputs{{2}};
148   flatbuffers::Offset<SubGraph> subgraph = CreateSubGraph(
149       builder, builder.CreateVector(tensors.data(), tensors.size()),
150       builder.CreateVector<int32_t>(subgraph_inputs.data(),
151                                     subgraph_inputs.size()),
152       builder.CreateVector<int32_t>(subgraph_outputs.data(),
153                                     subgraph_outputs.size()),
154       builder.CreateVector(&op, 1));
155 
156   flatbuffers::Offset<flatbuffers::String> description =
157       builder.CreateString("Reduce model");
158 
159   flatbuffers::Offset<Model> model_buffer = CreateModel(
160       builder, TFLITE_SCHEMA_VERSION, builder.CreateVector(&operator_code, 1),
161       builder.CreateVector(&subgraph, 1), description,
162       builder.CreateVector(buffers.data(), buffers.size()));
163 
164   builder.Finish(model_buffer);
165 
166   return std::vector<char>(builder.GetBufferPointer(),
167                            builder.GetBufferPointer() + builder.GetSize());
168 }
169 
ComputeSize(const std::vector<int32_t> & shape)170 int32_t ReduceTester::ComputeSize(const std::vector<int32_t>& shape) {
171   return std::accumulate(shape.cbegin(), shape.cend(), 1,
172                          std::multiplies<int32_t>());
173 }
174 
175 }  // namespace xnnpack
176 }  // namespace tflite
177