• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/delegates/xnnpack/dequantize_tester.h"
17 
18 #include <array>
19 #include <cstdint>
20 #include <functional>
21 #include <numeric>
22 #include <random>
23 #include <vector>
24 
25 #include <gtest/gtest.h>
26 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
27 #include "tensorflow/lite/interpreter.h"
28 #include "tensorflow/lite/kernels/register.h"
29 #include "tensorflow/lite/model.h"
30 #include "tensorflow/lite/schema/schema_conversion_utils.h"
31 #include "tensorflow/lite/schema/schema_generated.h"
32 #include "tensorflow/lite/version.h"
33 
34 namespace tflite {
35 namespace xnnpack {
36 
37 template <class T>
Test(Interpreter * delegate_interpreter,Interpreter * default_interpreter) const38 void DequantizeTester::Test(Interpreter* delegate_interpreter,
39                             Interpreter* default_interpreter) const {
40   std::random_device random_device;
41   auto rng = std::mt19937(random_device());
42   std::uniform_int_distribution<int32_t> input_distribution(
43       std::numeric_limits<T>::min(), std::numeric_limits<T>::max());
44   auto input_rng = std::bind(input_distribution, std::ref(rng));
45 
46   T* default_input_data = default_interpreter->typed_input_tensor<T>(0);
47   std::generate(default_input_data, default_input_data + ComputeSize(Shape()),
48                 std::ref(input_rng));
49 
50   T* delegate_input_data = delegate_interpreter->typed_input_tensor<T>(0);
51   std::copy(default_input_data, default_input_data + ComputeSize(Shape()),
52             delegate_input_data);
53 
54   ASSERT_EQ(default_interpreter->Invoke(), kTfLiteOk);
55   ASSERT_EQ(delegate_interpreter->Invoke(), kTfLiteOk);
56 
57   float* default_output_data =
58       default_interpreter->typed_output_tensor<float>(0);
59   float* delegate_output_data =
60       delegate_interpreter->typed_output_tensor<float>(0);
61 
62   for (size_t i = 0; i < ComputeSize(Shape()); i++) {
63     ASSERT_EQ(default_output_data[i], delegate_output_data[i])
64         << " at index " << i << " / " << ComputeSize(Shape());
65   }
66 }
67 
Test(TfLiteDelegate * delegate) const68 void DequantizeTester::Test(TfLiteDelegate* delegate) const {
69   std::vector<char> buffer = CreateTfLiteModel();
70   const Model* model = GetModel(buffer.data());
71 
72   std::unique_ptr<Interpreter> delegate_interpreter;
73   ASSERT_EQ(
74       InterpreterBuilder(
75           model,
76           ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
77           &delegate_interpreter),
78       kTfLiteOk);
79   std::unique_ptr<Interpreter> default_interpreter;
80   ASSERT_EQ(
81       InterpreterBuilder(
82           model,
83           ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
84           &default_interpreter),
85       kTfLiteOk);
86 
87   ASSERT_TRUE(delegate_interpreter);
88   ASSERT_TRUE(default_interpreter);
89 
90   ASSERT_EQ(delegate_interpreter->inputs().size(), 1);
91   ASSERT_EQ(default_interpreter->inputs().size(), 1);
92 
93   ASSERT_EQ(delegate_interpreter->outputs().size(), 1);
94   ASSERT_EQ(default_interpreter->outputs().size(), 1);
95 
96   ASSERT_EQ(delegate_interpreter->AllocateTensors(), kTfLiteOk);
97   ASSERT_EQ(default_interpreter->AllocateTensors(), kTfLiteOk);
98 
99   ASSERT_EQ(delegate_interpreter->ModifyGraphWithDelegate(delegate), kTfLiteOk);
100 
101   if (Unsigned()) {
102     Test<uint8_t>(delegate_interpreter.get(), default_interpreter.get());
103   } else {
104     Test<int8_t>(delegate_interpreter.get(), default_interpreter.get());
105   }
106 }
107 
CreateTfLiteModel() const108 std::vector<char> DequantizeTester::CreateTfLiteModel() const {
109   flatbuffers::FlatBufferBuilder builder;
110   flatbuffers::Offset<OperatorCode> operator_code =
111       CreateOperatorCode(builder, BuiltinOperator_DEQUANTIZE);
112 
113   const std::array<flatbuffers::Offset<Buffer>, 1> buffers{{
114       CreateBuffer(builder, builder.CreateVector({})),
115   }};
116 
117   const std::array<flatbuffers::Offset<Tensor>, 2> tensors{{
118       CreateTensor(
119           builder,
120           builder.CreateVector<int32_t>(Shape().data(), Shape().size()),
121           Unsigned() ? TensorType_UINT8 : TensorType_INT8,
122           /*buffer=*/0, /*name=*/0,
123           CreateQuantizationParameters(
124               builder, /*min=*/0, /*max=*/0,
125               builder.CreateVector<float>({InputScale()}),
126               builder.CreateVector<int64_t>({InputZeroPoint()}))),
127       CreateTensor(
128           builder,
129           builder.CreateVector<int32_t>(Shape().data(), Shape().size()),
130           TensorType_FLOAT32),
131   }};
132 
133   const std::array<int32_t, 1> op_inputs{{0}};
134   const std::array<int32_t, 1> op_outputs{{1}};
135   flatbuffers::Offset<Operator> op = CreateOperator(
136       builder, /*opcode_index=*/0,
137       builder.CreateVector<int32_t>(op_inputs.data(), op_inputs.size()),
138       builder.CreateVector<int32_t>(op_outputs.data(), op_outputs.size()));
139 
140   const std::array<int32_t, 1> subgraph_inputs{{0}};
141   const std::array<int32_t, 1> subgraph_outputs{{1}};
142   flatbuffers::Offset<SubGraph> subgraph = CreateSubGraph(
143       builder, builder.CreateVector(tensors.data(), tensors.size()),
144       builder.CreateVector<int32_t>(subgraph_inputs.data(),
145                                     subgraph_inputs.size()),
146       builder.CreateVector<int32_t>(subgraph_outputs.data(),
147                                     subgraph_outputs.size()),
148       builder.CreateVector(&op, 1));
149 
150   flatbuffers::Offset<flatbuffers::String> description =
151       builder.CreateString("Dequantize operator model");
152 
153   flatbuffers::Offset<Model> model_buffer = CreateModel(
154       builder, TFLITE_SCHEMA_VERSION, builder.CreateVector(&operator_code, 1),
155       builder.CreateVector(&subgraph, 1), description,
156       builder.CreateVector(buffers.data(), buffers.size()));
157 
158   builder.Finish(model_buffer);
159 
160   return std::vector<char>(builder.GetBufferPointer(),
161                            builder.GetBufferPointer() + builder.GetSize());
162 }
163 
ComputeSize(const std::vector<int32_t> & shape)164 int32_t DequantizeTester::ComputeSize(const std::vector<int32_t>& shape) {
165   return std::accumulate(shape.cbegin(), shape.cend(), 1,
166                          std::multiplies<int32_t>());
167 }
168 
169 }  // namespace xnnpack
170 }  // namespace tflite
171