1 /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/lite/delegates/xnnpack/quantized_unary_elementwise_tester.h"
17
18 #include <array>
19 #include <cstdint>
20 #include <functional>
21 #include <numeric>
22 #include <random>
23 #include <vector>
24
25 #include <gtest/gtest.h>
26 #include "flatbuffers/flatbuffers.h" // from @flatbuffers
27 #include "tensorflow/lite/interpreter.h"
28 #include "tensorflow/lite/kernels/register.h"
29 #include "tensorflow/lite/model.h"
30 #include "tensorflow/lite/schema/schema_conversion_utils.h"
31 #include "tensorflow/lite/schema/schema_generated.h"
32 #include "tensorflow/lite/version.h"
33
34 namespace tflite {
35 namespace xnnpack {
36
37 template <class T>
Test(Interpreter * delegate_interpreter,Interpreter * default_interpreter) const38 void QuantizedUnaryElementwiseTester::Test(
39 Interpreter* delegate_interpreter, Interpreter* default_interpreter) const {
40 std::random_device random_device;
41 auto rng = std::mt19937(random_device());
42 std::uniform_int_distribution<int32_t> input_distribution(
43 std::numeric_limits<T>::min(), std::numeric_limits<T>::max());
44 auto input_rng = std::bind(input_distribution, std::ref(rng));
45
46 T* default_input_data = default_interpreter->typed_input_tensor<T>(0);
47 std::generate(default_input_data, default_input_data + ComputeSize(Shape()),
48 std::ref(input_rng));
49
50 T* xnnpack_input_data = delegate_interpreter->typed_input_tensor<T>(0);
51 std::copy(default_input_data, default_input_data + ComputeSize(Shape()),
52 xnnpack_input_data);
53
54 ASSERT_EQ(default_interpreter->Invoke(), kTfLiteOk);
55 ASSERT_EQ(delegate_interpreter->Invoke(), kTfLiteOk);
56
57 T* default_output_data = default_interpreter->typed_output_tensor<T>(0);
58 T* delegate_output_data = delegate_interpreter->typed_output_tensor<T>(0);
59
60 for (size_t i = 0; i < ComputeSize(Shape()); i++) {
61 ASSERT_LE(std::abs(static_cast<int32_t>(default_output_data[i]) -
62 static_cast<int32_t>(delegate_output_data[i])),
63 1)
64 << "default " << static_cast<int32_t>(default_output_data[i])
65 << ", delegate " << static_cast<int32_t>(delegate_output_data[i])
66 << " at index " << i << " / " << ComputeSize(Shape());
67 }
68 }
69
Test(tflite::BuiltinOperator unary_op,TfLiteDelegate * delegate) const70 void QuantizedUnaryElementwiseTester::Test(tflite::BuiltinOperator unary_op,
71 TfLiteDelegate* delegate) const {
72 std::vector<char> buffer = CreateTfLiteModel(unary_op);
73 const Model* model = GetModel(buffer.data());
74
75 std::unique_ptr<Interpreter> delegate_interpreter;
76 ASSERT_EQ(
77 InterpreterBuilder(
78 model,
79 ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
80 &delegate_interpreter),
81 kTfLiteOk);
82 std::unique_ptr<Interpreter> default_interpreter;
83 ASSERT_EQ(
84 InterpreterBuilder(
85 model,
86 ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
87 &default_interpreter),
88 kTfLiteOk);
89
90 ASSERT_TRUE(delegate_interpreter);
91 ASSERT_TRUE(default_interpreter);
92
93 ASSERT_EQ(delegate_interpreter->inputs().size(), 1);
94 ASSERT_EQ(default_interpreter->inputs().size(), 1);
95
96 ASSERT_EQ(delegate_interpreter->outputs().size(), 1);
97 ASSERT_EQ(default_interpreter->outputs().size(), 1);
98
99 ASSERT_EQ(delegate_interpreter->AllocateTensors(), kTfLiteOk);
100 ASSERT_EQ(default_interpreter->AllocateTensors(), kTfLiteOk);
101
102 ASSERT_EQ(delegate_interpreter->ModifyGraphWithDelegate(delegate), kTfLiteOk);
103
104 if (Unsigned()) {
105 Test<uint8_t>(delegate_interpreter.get(), default_interpreter.get());
106 } else {
107 Test<int8_t>(delegate_interpreter.get(), default_interpreter.get());
108 }
109 }
110
CreateTfLiteModel(tflite::BuiltinOperator unary_op) const111 std::vector<char> QuantizedUnaryElementwiseTester::CreateTfLiteModel(
112 tflite::BuiltinOperator unary_op) const {
113 flatbuffers::FlatBufferBuilder builder;
114 flatbuffers::Offset<OperatorCode> operator_code =
115 CreateOperatorCode(builder, unary_op);
116
117 const std::array<flatbuffers::Offset<Buffer>, 1> buffers{{
118 CreateBuffer(builder, builder.CreateVector({})),
119 }};
120
121 const std::array<flatbuffers::Offset<Tensor>, 2> tensors{{
122 CreateTensor(
123 builder,
124 builder.CreateVector<int32_t>(Shape().data(), Shape().size()),
125 Unsigned() ? TensorType_UINT8 : TensorType_INT8,
126 /*buffer=*/0, /*name=*/0,
127 CreateQuantizationParameters(
128 builder, /*min=*/0, /*max=*/0,
129 builder.CreateVector<float>({InputScale()}),
130 builder.CreateVector<int64_t>({InputZeroPoint()}))),
131 CreateTensor(
132 builder,
133 builder.CreateVector<int32_t>(Shape().data(), Shape().size()),
134 Unsigned() ? TensorType_UINT8 : TensorType_INT8,
135 /*buffer=*/0, /*name=*/0,
136 CreateQuantizationParameters(
137 builder, /*min=*/0, /*max=*/0,
138 builder.CreateVector<float>({OutputScale()}),
139 builder.CreateVector<int64_t>({OutputZeroPoint()}))),
140 }};
141
142 const std::array<int32_t, 1> op_inputs{{0}};
143 const std::array<int32_t, 1> op_outputs{{1}};
144 flatbuffers::Offset<Operator> op = CreateOperator(
145 builder, /*opcode_index=*/0,
146 builder.CreateVector<int32_t>(op_inputs.data(), op_inputs.size()),
147 builder.CreateVector<int32_t>(op_outputs.data(), op_outputs.size()));
148
149 const std::array<int32_t, 1> subgraph_inputs{{0}};
150 const std::array<int32_t, 1> subgraph_outputs{{1}};
151 flatbuffers::Offset<SubGraph> subgraph = CreateSubGraph(
152 builder, builder.CreateVector(tensors.data(), tensors.size()),
153 builder.CreateVector<int32_t>(subgraph_inputs.data(),
154 subgraph_inputs.size()),
155 builder.CreateVector<int32_t>(subgraph_outputs.data(),
156 subgraph_outputs.size()),
157 builder.CreateVector(&op, 1));
158
159 flatbuffers::Offset<flatbuffers::String> description =
160 builder.CreateString("Quantized unary operator model");
161
162 flatbuffers::Offset<Model> model_buffer = CreateModel(
163 builder, TFLITE_SCHEMA_VERSION, builder.CreateVector(&operator_code, 1),
164 builder.CreateVector(&subgraph, 1), description,
165 builder.CreateVector(buffers.data(), buffers.size()));
166
167 builder.Finish(model_buffer);
168
169 return std::vector<char>(builder.GetBufferPointer(),
170 builder.GetBufferPointer() + builder.GetSize());
171 }
172
ComputeSize(const std::vector<int32_t> & shape)173 int32_t QuantizedUnaryElementwiseTester::ComputeSize(
174 const std::vector<int32_t>& shape) {
175 return std::accumulate(shape.cbegin(), shape.cend(), 1,
176 std::multiplies<int32_t>());
177 }
178
179 } // namespace xnnpack
180 } // namespace tflite
181