• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
17 
18 #include <algorithm>
19 #include <array>
20 #include <cstdint>
21 #include <functional>
22 #include <numeric>
23 #include <random>
24 #include <vector>
25 
26 #include <gtest/gtest.h>
27 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
28 #include "tensorflow/lite/interpreter.h"
29 #include "tensorflow/lite/kernels/register.h"
30 #include "tensorflow/lite/model.h"
31 #include "tensorflow/lite/schema/schema_conversion_utils.h"
32 #include "tensorflow/lite/schema/schema_generated.h"
33 #include "tensorflow/lite/version.h"
34 
35 namespace tflite {
36 namespace xnnpack {
37 
Test(tflite::BuiltinOperator unary_op,TfLiteDelegate * delegate) const38 void UnaryElementwiseTester::Test(tflite::BuiltinOperator unary_op,
39                                   TfLiteDelegate* delegate) const {
40   std::random_device random_device;
41   auto rng = std::mt19937(random_device());
42   std::uniform_real_distribution<float> input_distribution(-15.0f, 15.0f);
43   switch (unary_op) {
44     case BuiltinOperator_SQRT:
45       input_distribution = std::uniform_real_distribution<float>(0.0f, 10.0f);
46       break;
47     default:
48       break;
49   }
50   auto input_rng = std::bind(input_distribution, std::ref(rng));
51 
52   std::vector<char> buffer = CreateTfLiteModel(unary_op);
53   const Model* model = GetModel(buffer.data());
54 
55   std::unique_ptr<Interpreter> delegate_interpreter;
56   ASSERT_EQ(
57       InterpreterBuilder(
58           model,
59           ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
60           &delegate_interpreter),
61       kTfLiteOk);
62   std::unique_ptr<Interpreter> default_interpreter;
63   ASSERT_EQ(
64       InterpreterBuilder(
65           model,
66           ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
67           &default_interpreter),
68       kTfLiteOk);
69 
70   ASSERT_TRUE(delegate_interpreter);
71   ASSERT_TRUE(default_interpreter);
72 
73   ASSERT_EQ(delegate_interpreter->inputs().size(), 1);
74   ASSERT_EQ(default_interpreter->inputs().size(), 1);
75 
76   ASSERT_EQ(delegate_interpreter->outputs().size(), 1);
77   ASSERT_EQ(default_interpreter->outputs().size(), 1);
78 
79   ASSERT_EQ(delegate_interpreter->AllocateTensors(), kTfLiteOk);
80   ASSERT_EQ(default_interpreter->AllocateTensors(), kTfLiteOk);
81 
82   ASSERT_EQ(delegate_interpreter->ModifyGraphWithDelegate(delegate), kTfLiteOk);
83 
84   float* default_input_data = default_interpreter->typed_input_tensor<float>(0);
85   std::generate(default_input_data, default_input_data + Size(),
86                 std::ref(input_rng));
87 
88   float* delegate_input_data =
89       delegate_interpreter->typed_input_tensor<float>(0);
90   std::copy(default_input_data, default_input_data + Size(),
91             delegate_input_data);
92 
93   ASSERT_EQ(default_interpreter->Invoke(), kTfLiteOk);
94   ASSERT_EQ(delegate_interpreter->Invoke(), kTfLiteOk);
95 
96   float* default_output_data =
97       default_interpreter->typed_output_tensor<float>(0);
98   float* delegate_output_data =
99       delegate_interpreter->typed_output_tensor<float>(0);
100 
101   switch (unary_op) {
102     case BuiltinOperator_ABS:
103     case BuiltinOperator_CEIL:
104     case BuiltinOperator_FLOOR:
105     case BuiltinOperator_NEG:
106     case BuiltinOperator_RELU:
107     case BuiltinOperator_RELU_N1_TO_1:
108     case BuiltinOperator_RELU6:
109     case BuiltinOperator_ROUND:
110     case BuiltinOperator_SQUARE:
111     case BuiltinOperator_SQRT:
112       for (size_t i = 0; i < Size(); i++) {
113         ASSERT_EQ(default_output_data[i], delegate_output_data[i]);
114       }
115       break;
116     default:
117       for (size_t i = 0; i < Size(); i++) {
118         ASSERT_NEAR(
119             default_output_data[i], delegate_output_data[i],
120             std::numeric_limits<float>::epsilon() *
121                 std::max(std::abs(default_output_data[i]) * RelativeTolerance(),
122                          1.0f));
123       }
124       break;
125   }
126 }
127 
CreateTfLiteModel(tflite::BuiltinOperator unary_op) const128 std::vector<char> UnaryElementwiseTester::CreateTfLiteModel(
129     tflite::BuiltinOperator unary_op) const {
130   flatbuffers::FlatBufferBuilder builder;
131   flatbuffers::Offset<OperatorCode> operator_code =
132       CreateOperatorCode(builder, unary_op);
133 
134   const std::array<flatbuffers::Offset<Buffer>, 1> buffers{{
135       CreateBuffer(builder, builder.CreateVector({})),
136   }};
137 
138   const std::array<flatbuffers::Offset<Tensor>, 2> tensors{{
139       CreateTensor(
140           builder,
141           builder.CreateVector<int32_t>(Shape().data(), Shape().size()),
142           TensorType_FLOAT32),
143       CreateTensor(
144           builder,
145           builder.CreateVector<int32_t>(Shape().data(), Shape().size()),
146           TensorType_FLOAT32),
147   }};
148 
149   const std::array<int32_t, 1> op_inputs{{0}};
150   const std::array<int32_t, 1> op_outputs{{1}};
151   flatbuffers::Offset<Operator> op = CreateOperator(
152       builder, /*opcode_index=*/0,
153       builder.CreateVector<int32_t>(op_inputs.data(), op_inputs.size()),
154       builder.CreateVector<int32_t>(op_outputs.data(), op_outputs.size()));
155 
156   const std::array<int32_t, 1> subgraph_inputs{{0}};
157   const std::array<int32_t, 1> subgraph_outputs{{1}};
158   flatbuffers::Offset<SubGraph> subgraph = CreateSubGraph(
159       builder, builder.CreateVector(tensors.data(), tensors.size()),
160       builder.CreateVector<int32_t>(subgraph_inputs.data(),
161                                     subgraph_inputs.size()),
162       builder.CreateVector<int32_t>(subgraph_outputs.data(),
163                                     subgraph_outputs.size()),
164       builder.CreateVector(&op, 1));
165 
166   flatbuffers::Offset<flatbuffers::String> description =
167       builder.CreateString("Unary operator model");
168 
169   flatbuffers::Offset<Model> model_buffer = CreateModel(
170       builder, TFLITE_SCHEMA_VERSION, builder.CreateVector(&operator_code, 1),
171       builder.CreateVector(&subgraph, 1), description,
172       builder.CreateVector(buffers.data(), buffers.size()));
173 
174   builder.Finish(model_buffer);
175 
176   return std::vector<char>(builder.GetBufferPointer(),
177                            builder.GetBufferPointer() + builder.GetSize());
178 }
179 
ComputeSize(const std::vector<int32_t> & shape)180 int32_t UnaryElementwiseTester::ComputeSize(const std::vector<int32_t>& shape) {
181   return std::accumulate(shape.cbegin(), shape.cend(), 1,
182                          std::multiplies<int32_t>());
183 }
184 
185 }  // namespace xnnpack
186 }  // namespace tflite
187