• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/delegates/xnnpack/binary_elementwise_tester.h"
17 
18 #include <array>
19 #include <cstdint>
20 #include <functional>
21 #include <numeric>
22 #include <random>
23 #include <vector>
24 
25 #include <gtest/gtest.h>
26 #include <fp16.h>
27 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
28 #include "tensorflow/lite/interpreter.h"
29 #include "tensorflow/lite/kernels/register.h"
30 #include "tensorflow/lite/model.h"
31 #include "tensorflow/lite/schema/schema_conversion_utils.h"
32 #include "tensorflow/lite/schema/schema_generated.h"
33 #include "tensorflow/lite/version.h"
34 
35 namespace tflite {
36 namespace xnnpack {
37 
OutputShape() const38 std::vector<int32_t> BinaryElementwiseTester::OutputShape() const {
39   std::vector<int32_t> output_shape;
40   if (!input1_shape_.empty()) {
41     output_shape.insert(
42         output_shape.end(), input1_shape_.cbegin(),
43         input1_shape_.cbegin() +
44             std::max(input1_shape_.size(), input2_shape_.size()) -
45             input2_shape_.size());
46   }
47   if (!input2_shape_.empty()) {
48     output_shape.insert(
49         output_shape.end(), input2_shape_.cbegin(),
50         input2_shape_.cbegin() +
51             std::max(input2_shape_.size(), input1_shape_.size()) -
52             input1_shape_.size());
53   }
54   for (size_t i = std::min(input1_shape_.size(), input2_shape_.size()); i >= 1;
55        i--) {
56     output_shape.push_back(
57         std::max(*(input1_shape_.cend() - i), *(input2_shape_.cend() - i)));
58   }
59   return output_shape;
60 }
61 
Test(tflite::BuiltinOperator binary_op,TfLiteDelegate * delegate) const62 void BinaryElementwiseTester::Test(tflite::BuiltinOperator binary_op,
63                                    TfLiteDelegate* delegate) const {
64   if (Input1Static()) {
65     ASSERT_FALSE(Input2Static());
66   }
67   if (FP16Weights()) {
68     ASSERT_TRUE(Input1Static() || Input2Static());
69   }
70 
71   std::random_device random_device;
72   auto rng = std::mt19937(random_device());
73   std::uniform_real_distribution<float> input1_distribution(-25.0f, 25.0f);
74   std::uniform_real_distribution<float> input2_distribution(-25.0f, 25.0f);
75   switch (binary_op) {
76     case BuiltinOperator_DIV:
77       input1_distribution = std::uniform_real_distribution<float>(-5.0f, 5.0f);
78       input2_distribution = std::uniform_real_distribution<float>(0.1f, 1.0f);
79       break;
80     case BuiltinOperator_MUL:
81       input1_distribution = std::uniform_real_distribution<float>(-5.0f, 5.0f);
82       input2_distribution = std::uniform_real_distribution<float>(-5.0f, 5.0f);
83       break;
84     default:
85       break;
86   }
87   auto input1_rng = std::bind(input1_distribution, std::ref(rng));
88   auto input2_rng = std::bind(input2_distribution, std::ref(rng));
89 
90   std::vector<char> buffer = CreateTfLiteModel(binary_op);
91   const Model* model = GetModel(buffer.data());
92 
93   std::unique_ptr<Interpreter> delegate_interpreter;
94   ASSERT_EQ(
95       InterpreterBuilder(
96           model,
97           ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
98           &delegate_interpreter),
99       kTfLiteOk);
100   std::unique_ptr<Interpreter> default_interpreter;
101   ASSERT_EQ(
102       InterpreterBuilder(
103           model,
104           ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
105           &default_interpreter),
106       kTfLiteOk);
107 
108   ASSERT_TRUE(delegate_interpreter);
109   ASSERT_TRUE(default_interpreter);
110 
111   if (Input1Static() || Input2Static()) {
112     ASSERT_EQ(delegate_interpreter->inputs().size(), 1);
113     ASSERT_EQ(default_interpreter->inputs().size(), 1);
114   } else {
115     ASSERT_EQ(delegate_interpreter->inputs().size(), 2);
116     ASSERT_EQ(default_interpreter->inputs().size(), 2);
117   }
118 
119   ASSERT_EQ(delegate_interpreter->outputs().size(), 1);
120   ASSERT_EQ(default_interpreter->outputs().size(), 1);
121 
122   ASSERT_EQ(delegate_interpreter->AllocateTensors(), kTfLiteOk);
123   ASSERT_EQ(default_interpreter->AllocateTensors(), kTfLiteOk);
124 
125   ASSERT_EQ(delegate_interpreter->ModifyGraphWithDelegate(delegate), kTfLiteOk);
126 
127   if (!Input1Static()) {
128     float* default_input1_data = default_interpreter->typed_tensor<float>(
129         default_interpreter->inputs()[0]);
130     std::generate(default_input1_data,
131                   default_input1_data + ComputeSize(Input1Shape()),
132                   std::ref(input1_rng));
133 
134     float* xnnpack_input1_data = delegate_interpreter->typed_tensor<float>(
135         delegate_interpreter->inputs()[0]);
136     std::copy(default_input1_data,
137               default_input1_data + ComputeSize(Input1Shape()),
138               xnnpack_input1_data);
139   }
140 
141   if (!Input2Static()) {
142     float* default_input2_data = default_interpreter->typed_tensor<float>(
143         default_interpreter->inputs()[Input1Static() ? 0 : 1]);
144     std::generate(default_input2_data,
145                   default_input2_data + ComputeSize(Input2Shape()),
146                   std::ref(input2_rng));
147 
148     float* xnnpack_input2_data = delegate_interpreter->typed_tensor<float>(
149         delegate_interpreter->inputs()[Input1Static() ? 0 : 1]);
150     std::copy(default_input2_data,
151               default_input2_data + ComputeSize(Input2Shape()),
152               xnnpack_input2_data);
153   }
154 
155   ASSERT_EQ(default_interpreter->Invoke(), kTfLiteOk);
156   ASSERT_EQ(delegate_interpreter->Invoke(), kTfLiteOk);
157 
158   float* default_output_data = default_interpreter->typed_tensor<float>(
159       default_interpreter->outputs()[0]);
160   float* xnnpack_output_data = delegate_interpreter->typed_tensor<float>(
161       delegate_interpreter->outputs()[0]);
162 
163   for (size_t i = 0; i < ComputeSize(OutputShape()); i++) {
164     ASSERT_NEAR(default_output_data[i], xnnpack_output_data[i],
165                 std::numeric_limits<float>::epsilon() *
166                     std::max(std::abs(default_output_data[i]) * 2.0f, 1.0f));
167   }
168 }
169 
CreateTfLiteModel(tflite::BuiltinOperator binary_op) const170 std::vector<char> BinaryElementwiseTester::CreateTfLiteModel(
171     tflite::BuiltinOperator binary_op) const {
172   std::random_device random_device;
173   auto rng = std::mt19937(random_device());
174   std::uniform_real_distribution<float> input1_distribution(-25.0f, 25.0f);
175   std::uniform_real_distribution<float> input2_distribution(-25.0f, 25.0f);
176   switch (binary_op) {
177     case BuiltinOperator_DIV:
178       input1_distribution = std::uniform_real_distribution<float>(-5.0f, 5.0f);
179       input2_distribution = std::uniform_real_distribution<float>(0.1f, 1.0f);
180       break;
181     case BuiltinOperator_MUL:
182       input1_distribution = std::uniform_real_distribution<float>(-5.0f, 5.0f);
183       input2_distribution = std::uniform_real_distribution<float>(-5.0f, 5.0f);
184       break;
185     default:
186       break;
187   }
188   auto input1_rng = std::bind(input1_distribution, std::ref(rng));
189   auto input2_rng = std::bind(input2_distribution, std::ref(rng));
190 
191   flatbuffers::FlatBufferBuilder builder;
192   std::vector<flatbuffers::Offset<OperatorCode>> operator_codes{
193       {CreateOperatorCode(builder, binary_op)}};
194   if (FP16Weights()) {
195     operator_codes.emplace_back(
196         CreateOperatorCode(builder, BuiltinOperator_DEQUANTIZE));
197   } else if (SparseWeights()) {
198     operator_codes.emplace_back(
199         CreateOperatorCode(builder, BuiltinOperator_DENSIFY));
200   }
201 
202   std::vector<flatbuffers::Offset<Buffer>> buffers{{
203       CreateBuffer(builder, builder.CreateVector({})),
204   }};
205 
206   int32_t input1_buffer = 0;
207   if (Input1Static()) {
208     if (FP16Weights()) {
209       std::vector<uint16_t> input1_data(ComputeSize(Input1Shape()));
210       std::generate(input1_data.begin(), input1_data.end(),
211                     std::bind(fp16_ieee_from_fp32_value, input1_rng));
212 
213       buffers.push_back(CreateBuffer(
214           builder, builder.CreateVector(
215                        reinterpret_cast<const uint8_t*>(input1_data.data()),
216                        sizeof(uint16_t) * input1_data.size())));
217     } else {
218       std::vector<float> input1_data(ComputeSize(Input1Shape()));
219       std::generate(input1_data.begin(), input1_data.end(), input1_rng);
220 
221       if (!SparseWeights()) {
222         input1_buffer = buffers.size();
223       }
224       buffers.push_back(CreateBuffer(
225           builder, builder.CreateVector(
226                        reinterpret_cast<const uint8_t*>(input1_data.data()),
227                        sizeof(float) * input1_data.size())));
228     }
229   }
230 
231   int32_t input2_buffer = 0;
232   if (Input2Static()) {
233     if (FP16Weights()) {
234       std::vector<uint16_t> input2_data(ComputeSize(Input2Shape()));
235       std::generate(input2_data.begin(), input2_data.end(),
236                     std::bind(fp16_ieee_from_fp32_value, input1_rng));
237 
238       buffers.push_back(CreateBuffer(
239           builder, builder.CreateVector(
240                        reinterpret_cast<const uint8_t*>(input2_data.data()),
241                        sizeof(uint16_t) * input2_data.size())));
242     } else {
243       std::vector<float> input2_data(ComputeSize(Input2Shape()));
244       std::generate(input2_data.begin(), input2_data.end(), input2_rng);
245 
246       if (!SparseWeights()) {
247         input2_buffer = buffers.size();
248       }
249       buffers.push_back(CreateBuffer(
250           builder, builder.CreateVector(
251                        reinterpret_cast<const uint8_t*>(input2_data.data()),
252                        sizeof(float) * input2_data.size())));
253     }
254   }
255 
256   const std::vector<int32_t> output_shape = OutputShape();
257   std::vector<flatbuffers::Offset<Tensor>> tensors;
258   std::vector<flatbuffers::Offset<Operator>> operators;
259   if (FP16Weights() && Input1Static()) {
260     tensors.emplace_back(
261         CreateTensor(builder,
262                      builder.CreateVector<int32_t>(Input1Shape().data(),
263                                                    Input1Shape().size()),
264                      TensorType_FLOAT16, 1));
265   } else if (SparseWeights() && Input1Static()) {
266     int dims_count = Input1Shape().size();
267     std::vector<flatbuffers::Offset<DimensionMetadata>> dim_metadata(
268         dims_count);
269     std::vector<int> traversal_order(dims_count);
270     for (int i = 0; i < dims_count; i++) {
271       traversal_order[i] = i;
272       dim_metadata[i] = CreateDimensionMetadata(builder, DimensionType_DENSE,
273                                                 Input1Shape()[i]);
274     }
275     flatbuffers::Offset<SparsityParameters> sparsity_param =
276         CreateSparsityParameters(builder, builder.CreateVector(traversal_order),
277                                  0, builder.CreateVector(dim_metadata));
278     tensors.emplace_back(CreateTensor(
279         builder,
280         builder.CreateVector<int32_t>(Input1Shape().data(),
281                                       Input1Shape().size()),
282         TensorType_FLOAT32, /*buffer=*/1, /*name=*/0, /*quantization=*/0,
283         /*is_variable=*/false, /*sparsity=*/sparsity_param));
284   }
285   if (FP16Weights() && Input2Static()) {
286     tensors.emplace_back(
287         CreateTensor(builder,
288                      builder.CreateVector<int32_t>(Input2Shape().data(),
289                                                    Input2Shape().size()),
290                      TensorType_FLOAT16, 1));
291   } else if (SparseWeights() && Input2Static()) {
292     int dims_count = Input2Shape().size();
293     std::vector<flatbuffers::Offset<DimensionMetadata>> dim_metadata(
294         dims_count);
295     std::vector<int> traversal_order(dims_count);
296     for (int i = 0; i < dims_count; i++) {
297       traversal_order[i] = i;
298       dim_metadata[i] = CreateDimensionMetadata(builder, DimensionType_DENSE,
299                                                 Input2Shape()[i]);
300     }
301     flatbuffers::Offset<SparsityParameters> sparsity_param =
302         CreateSparsityParameters(builder, builder.CreateVector(traversal_order),
303                                  0, builder.CreateVector(dim_metadata));
304     tensors.emplace_back(CreateTensor(
305         builder,
306         builder.CreateVector<int32_t>(Input2Shape().data(),
307                                       Input2Shape().size()),
308         TensorType_FLOAT32, /*buffer=*/1, /*name=*/0, /*quantization=*/0,
309         /*is_variable=*/false, /*sparsity=*/sparsity_param));
310   }
311   if (FP16Weights()) {
312     const std::array<int32_t, 1> dequantize_inputs{{0}};
313     const std::array<int32_t, 1> dequantize_outputs{{Input1Static() ? 1 : 2}};
314     operators.emplace_back(CreateOperator(
315         builder, /*opcode_index=*/1,
316         builder.CreateVector<int32_t>(dequantize_inputs.data(),
317                                       dequantize_inputs.size()),
318         builder.CreateVector<int32_t>(dequantize_outputs.data(),
319                                       dequantize_outputs.size())));
320   } else if (SparseWeights()) {
321     const std::array<int32_t, 1> densify_inputs{{0}};
322     const std::array<int32_t, 1> densify_outputs{{Input1Static() ? 1 : 2}};
323     operators.emplace_back(
324         CreateOperator(builder, /*opcode_index=*/1,
325                        builder.CreateVector<int32_t>(densify_inputs.data(),
326                                                      densify_inputs.size()),
327                        builder.CreateVector<int32_t>(densify_outputs.data(),
328                                                      densify_outputs.size())));
329   }
330   tensors.emplace_back(CreateTensor(
331       builder,
332       builder.CreateVector<int32_t>(Input1Shape().data(), Input1Shape().size()),
333       TensorType_FLOAT32, input1_buffer));
334   tensors.emplace_back(CreateTensor(
335       builder,
336       builder.CreateVector<int32_t>(Input2Shape().data(), Input2Shape().size()),
337       TensorType_FLOAT32, input2_buffer));
338   tensors.emplace_back(CreateTensor(
339       builder,
340       builder.CreateVector<int32_t>(output_shape.data(), output_shape.size()),
341       TensorType_FLOAT32));
342 
343   tflite::BuiltinOptions builtin_options_type = tflite::BuiltinOptions_NONE;
344   flatbuffers::Offset<void> builtin_options = 0;
345   switch (binary_op) {
346     case BuiltinOperator_ADD:
347       builtin_options_type = BuiltinOptions_AddOptions;
348       builtin_options = CreateAddOptions(builder, Activation()).Union();
349       break;
350     case BuiltinOperator_DIV:
351       builtin_options_type = BuiltinOptions_DivOptions;
352       builtin_options = CreateDivOptions(builder, Activation()).Union();
353       break;
354     case BuiltinOperator_MUL:
355       builtin_options_type = BuiltinOptions_MulOptions;
356       builtin_options = CreateMulOptions(builder, Activation()).Union();
357       break;
358     case BuiltinOperator_SUB:
359       builtin_options_type = BuiltinOptions_SubOptions;
360       builtin_options = CreateSubOptions(builder, Activation()).Union();
361       break;
362     default:
363       EXPECT_EQ(Activation(), ActivationFunctionType_NONE);
364   }
365 
366   const std::array<int32_t, 2> op_inputs{
367       {static_cast<int>(tensors.size()) - 3,
368        static_cast<int>(tensors.size()) - 2}};
369   const std::array<int32_t, 1> op_outputs{
370       {static_cast<int>(tensors.size()) - 1}};
371   operators.emplace_back(CreateOperator(
372       builder, /*opcode_index=*/0,
373       builder.CreateVector<int32_t>(op_inputs.data(), op_inputs.size()),
374       builder.CreateVector<int32_t>(op_outputs.data(), op_outputs.size()),
375       builtin_options_type, builtin_options));
376 
377   std::vector<int32_t> subgraph_inputs;
378   if (!Input1Static()) {
379     subgraph_inputs.push_back(tensors.size() - 3);
380   }
381   if (!Input2Static()) {
382     subgraph_inputs.push_back(tensors.size() - 2);
383   }
384   const std::array<int32_t, 1> subgraph_outputs{
385       {static_cast<int>(tensors.size()) - 1}};
386   flatbuffers::Offset<SubGraph> subgraph = CreateSubGraph(
387       builder, builder.CreateVector(tensors.data(), tensors.size()),
388       builder.CreateVector<int32_t>(subgraph_inputs.data(),
389                                     subgraph_inputs.size()),
390       builder.CreateVector<int32_t>(subgraph_outputs.data(),
391                                     subgraph_outputs.size()),
392       builder.CreateVector(operators.data(), operators.size()));
393 
394   flatbuffers::Offset<flatbuffers::String> description =
395       builder.CreateString("Binary operator model");
396 
397   flatbuffers::Offset<Model> model_buffer = CreateModel(
398       builder, TFLITE_SCHEMA_VERSION,
399       builder.CreateVector(operator_codes.data(), operator_codes.size()),
400       builder.CreateVector(&subgraph, 1), description,
401       builder.CreateVector(buffers.data(), buffers.size()));
402 
403   builder.Finish(model_buffer);
404 
405   return std::vector<char>(builder.GetBufferPointer(),
406                            builder.GetBufferPointer() + builder.GetSize());
407 }
408 
ComputeSize(const std::vector<int32_t> & shape)409 int32_t BinaryElementwiseTester::ComputeSize(
410     const std::vector<int32_t>& shape) {
411   return std::accumulate(shape.cbegin(), shape.cend(), 1,
412                          std::multiplies<int32_t>());
413 }
414 
415 }  // namespace xnnpack
416 }  // namespace tflite
417