• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/delegates/xnnpack/quantized_leaky_relu_tester.h"
17 
18 #include <array>
19 #include <cstdint>
20 #include <functional>
21 #include <numeric>
22 #include <random>
23 #include <vector>
24 
25 #include <gtest/gtest.h>
26 #include "flatbuffers/flatbuffers.h"  // from @flatbuffers
27 #include "tensorflow/lite/interpreter.h"
28 #include "tensorflow/lite/kernels/register.h"
29 #include "tensorflow/lite/model.h"
30 #include "tensorflow/lite/schema/schema_conversion_utils.h"
31 #include "tensorflow/lite/schema/schema_generated.h"
32 #include "tensorflow/lite/version.h"
33 
34 namespace tflite {
35 namespace xnnpack {
36 
37 template <class T>
Test(Interpreter * delegate_interpreter,Interpreter * default_interpreter) const38 void QuantizedLeakyReluTester::Test(Interpreter* delegate_interpreter,
39                                     Interpreter* default_interpreter) const {
40   std::random_device random_device;
41   auto rng = std::mt19937(random_device());
42   std::uniform_int_distribution<int32_t> input_distribution(
43       std::numeric_limits<T>::min(), std::numeric_limits<T>::max());
44   auto input_rng = std::bind(input_distribution, std::ref(rng));
45 
46   T* default_input_data = default_interpreter->typed_input_tensor<T>(0);
47   std::generate(default_input_data, default_input_data + ComputeSize(Shape()),
48                 std::ref(input_rng));
49 
50   T* xnnpack_input_data = delegate_interpreter->typed_input_tensor<T>(0);
51   std::copy(default_input_data, default_input_data + ComputeSize(Shape()),
52             xnnpack_input_data);
53 
54   ASSERT_EQ(default_interpreter->Invoke(), kTfLiteOk);
55   ASSERT_EQ(delegate_interpreter->Invoke(), kTfLiteOk);
56 
57   T* default_output_data = default_interpreter->typed_output_tensor<T>(0);
58   T* delegate_output_data = delegate_interpreter->typed_output_tensor<T>(0);
59 
60   for (size_t i = 0; i < ComputeSize(Shape()); i++) {
61     ASSERT_LE(std::abs(static_cast<int32_t>(default_output_data[i]) -
62                        static_cast<int32_t>(delegate_output_data[i])),
63               1)
64         << "default " << static_cast<int32_t>(default_output_data[i])
65         << ", delegate " << static_cast<int32_t>(delegate_output_data[i])
66         << " at index " << i << " / " << ComputeSize(Shape());
67   }
68 }
69 
Test(TfLiteDelegate * delegate) const70 void QuantizedLeakyReluTester::Test(TfLiteDelegate* delegate) const {
71   std::vector<char> buffer = CreateTfLiteModel();
72   const Model* model = GetModel(buffer.data());
73 
74   std::unique_ptr<Interpreter> delegate_interpreter;
75   ASSERT_EQ(
76       InterpreterBuilder(
77           model,
78           ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
79           &delegate_interpreter),
80       kTfLiteOk);
81   std::unique_ptr<Interpreter> default_interpreter;
82   ASSERT_EQ(
83       InterpreterBuilder(
84           model,
85           ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
86           &default_interpreter),
87       kTfLiteOk);
88 
89   ASSERT_TRUE(delegate_interpreter);
90   ASSERT_TRUE(default_interpreter);
91 
92   ASSERT_EQ(delegate_interpreter->inputs().size(), 1);
93   ASSERT_EQ(default_interpreter->inputs().size(), 1);
94 
95   ASSERT_EQ(delegate_interpreter->outputs().size(), 1);
96   ASSERT_EQ(default_interpreter->outputs().size(), 1);
97 
98   ASSERT_EQ(delegate_interpreter->AllocateTensors(), kTfLiteOk);
99   ASSERT_EQ(default_interpreter->AllocateTensors(), kTfLiteOk);
100 
101   ASSERT_EQ(delegate_interpreter->ModifyGraphWithDelegate(delegate), kTfLiteOk);
102 
103   if (Unsigned()) {
104     Test<uint8_t>(delegate_interpreter.get(), default_interpreter.get());
105   } else {
106     Test<int8_t>(delegate_interpreter.get(), default_interpreter.get());
107   }
108 }
109 
CreateTfLiteModel() const110 std::vector<char> QuantizedLeakyReluTester::CreateTfLiteModel() const {
111   flatbuffers::FlatBufferBuilder builder;
112   flatbuffers::Offset<OperatorCode> operator_code =
113       CreateOperatorCode(builder, BuiltinOperator_LEAKY_RELU);
114 
115   const std::array<flatbuffers::Offset<Buffer>, 1> buffers{{
116       CreateBuffer(builder, builder.CreateVector({})),
117   }};
118 
119   const std::array<flatbuffers::Offset<Tensor>, 2> tensors{{
120       CreateTensor(
121           builder,
122           builder.CreateVector<int32_t>(Shape().data(), Shape().size()),
123           Unsigned() ? TensorType_UINT8 : TensorType_INT8,
124           /*buffer=*/0, /*name=*/0,
125           CreateQuantizationParameters(
126               builder, /*min=*/0, /*max=*/0,
127               builder.CreateVector<float>({InputScale()}),
128               builder.CreateVector<int64_t>({InputZeroPoint()}))),
129       CreateTensor(
130           builder,
131           builder.CreateVector<int32_t>(Shape().data(), Shape().size()),
132           Unsigned() ? TensorType_UINT8 : TensorType_INT8,
133           /*buffer=*/0, /*name=*/0,
134           CreateQuantizationParameters(
135               builder, /*min=*/0, /*max=*/0,
136               builder.CreateVector<float>({OutputScale()}),
137               builder.CreateVector<int64_t>({OutputZeroPoint()}))),
138   }};
139 
140   const flatbuffers::Offset<LeakyReluOptions> leaky_relu_options =
141       CreateLeakyReluOptions(builder, NegativeSlope());
142 
143   const std::array<int32_t, 1> op_inputs{{0}};
144   const std::array<int32_t, 1> op_outputs{{1}};
145   flatbuffers::Offset<Operator> op = CreateOperator(
146       builder, /*opcode_index=*/0,
147       builder.CreateVector<int32_t>(op_inputs.data(), op_inputs.size()),
148       builder.CreateVector<int32_t>(op_outputs.data(), op_outputs.size()),
149       BuiltinOptions_LeakyReluOptions, leaky_relu_options.Union());
150 
151   const std::array<int32_t, 1> subgraph_inputs{{0}};
152   const std::array<int32_t, 1> subgraph_outputs{{1}};
153   flatbuffers::Offset<SubGraph> subgraph = CreateSubGraph(
154       builder, builder.CreateVector(tensors.data(), tensors.size()),
155       builder.CreateVector<int32_t>(subgraph_inputs.data(),
156                                     subgraph_inputs.size()),
157       builder.CreateVector<int32_t>(subgraph_outputs.data(),
158                                     subgraph_outputs.size()),
159       builder.CreateVector(&op, 1));
160 
161   flatbuffers::Offset<flatbuffers::String> description =
162       builder.CreateString("Quantized unary operator model");
163 
164   flatbuffers::Offset<Model> model_buffer = CreateModel(
165       builder, TFLITE_SCHEMA_VERSION, builder.CreateVector(&operator_code, 1),
166       builder.CreateVector(&subgraph, 1), description,
167       builder.CreateVector(buffers.data(), buffers.size()));
168 
169   builder.Finish(model_buffer);
170 
171   return std::vector<char>(builder.GetBufferPointer(),
172                            builder.GetBufferPointer() + builder.GetSize());
173 }
174 
ComputeSize(const std::vector<int32_t> & shape)175 int32_t QuantizedLeakyReluTester::ComputeSize(
176     const std::vector<int32_t>& shape) {
177   return std::accumulate(shape.cbegin(), shape.cend(), 1,
178                          std::multiplies<int32_t>());
179 }
180 
181 }  // namespace xnnpack
182 }  // namespace tflite
183