1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/lite/delegates/xnnpack/prelu_tester.h"
17
18 #include <array>
19 #include <cstdint>
20 #include <functional>
21 #include <numeric>
22 #include <random>
23 #include <vector>
24
25 #include <gtest/gtest.h>
26 #include <fp16.h>
27 #include "flatbuffers/flatbuffers.h" // from @flatbuffers
28 #include "tensorflow/lite/interpreter.h"
29 #include "tensorflow/lite/kernels/register.h"
30 #include "tensorflow/lite/model.h"
31 #include "tensorflow/lite/schema/schema_conversion_utils.h"
32 #include "tensorflow/lite/schema/schema_generated.h"
33 #include "tensorflow/lite/version.h"
34
35 namespace tflite {
36 namespace xnnpack {
37
Test(TfLiteDelegate * delegate) const38 void PreluTester::Test(TfLiteDelegate* delegate) const {
39 std::random_device random_device;
40 auto rng = std::mt19937(random_device());
41 auto input_rng = std::bind(std::uniform_real_distribution<float>(-1.0f, 1.0f),
42 std::ref(rng));
43
44 std::vector<char> buffer = CreateTfLiteModel();
45 const Model* model = GetModel(buffer.data());
46
47 std::unique_ptr<Interpreter> delegate_interpreter;
48 ASSERT_EQ(
49 InterpreterBuilder(
50 model,
51 ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
52 &delegate_interpreter),
53 kTfLiteOk);
54 std::unique_ptr<Interpreter> default_interpreter;
55 ASSERT_EQ(
56 InterpreterBuilder(
57 model,
58 ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates())(
59 &default_interpreter),
60 kTfLiteOk);
61
62 ASSERT_TRUE(delegate_interpreter);
63 ASSERT_TRUE(default_interpreter);
64
65 ASSERT_EQ(delegate_interpreter->inputs().size(), 1);
66 ASSERT_EQ(default_interpreter->inputs().size(), 1);
67
68 ASSERT_EQ(delegate_interpreter->outputs().size(), 1);
69 ASSERT_EQ(default_interpreter->outputs().size(), 1);
70
71 ASSERT_EQ(delegate_interpreter->AllocateTensors(), kTfLiteOk);
72 ASSERT_EQ(default_interpreter->AllocateTensors(), kTfLiteOk);
73
74 ASSERT_EQ(delegate_interpreter->ModifyGraphWithDelegate(delegate), kTfLiteOk);
75
76 float* default_input_data = default_interpreter->typed_tensor<float>(
77 default_interpreter->inputs()[0]);
78 std::generate(default_input_data,
79 default_input_data + ComputeSize(InputShape()),
80 std::ref(input_rng));
81
82 float* xnnpack_input_data = delegate_interpreter->typed_tensor<float>(
83 delegate_interpreter->inputs()[0]);
84 std::copy(default_input_data, default_input_data + ComputeSize(InputShape()),
85 xnnpack_input_data);
86
87 ASSERT_EQ(default_interpreter->Invoke(), kTfLiteOk);
88 ASSERT_EQ(delegate_interpreter->Invoke(), kTfLiteOk);
89
90 float* default_output_data = default_interpreter->typed_tensor<float>(
91 default_interpreter->outputs()[0]);
92 float* xnnpack_output_data = delegate_interpreter->typed_tensor<float>(
93 delegate_interpreter->outputs()[0]);
94
95 for (size_t i = 0; i < ComputeSize(OutputShape()); i++) {
96 ASSERT_EQ(default_output_data[i], xnnpack_output_data[i]);
97 }
98 }
99
CreateTfLiteModel() const100 std::vector<char> PreluTester::CreateTfLiteModel() const {
101 std::random_device random_device;
102 auto rng = std::mt19937(random_device());
103 auto slope_rng = std::bind(std::uniform_real_distribution<float>(0.25f, 0.5f),
104 std::ref(rng));
105
106 flatbuffers::FlatBufferBuilder builder;
107 std::vector<flatbuffers::Offset<OperatorCode>> operator_codes{
108 {CreateOperatorCode(builder, BuiltinOperator_PRELU)}};
109 if (FP16Weights()) {
110 operator_codes.emplace_back(
111 CreateOperatorCode(builder, BuiltinOperator_DEQUANTIZE));
112 } else if (SparseWeights()) {
113 operator_codes.emplace_back(
114 CreateOperatorCode(builder, BuiltinOperator_DENSIFY));
115 }
116
117 std::vector<flatbuffers::Offset<Buffer>> buffers{{
118 CreateBuffer(builder, builder.CreateVector({})),
119 }};
120
121 if (FP16Weights()) {
122 std::vector<uint16_t> slope_data(ComputeSize(SlopeShape()));
123 std::generate(slope_data.begin(), slope_data.end(),
124 std::bind(fp16_ieee_from_fp32_value, slope_rng));
125
126 buffers.push_back(CreateBuffer(
127 builder, builder.CreateVector(
128 reinterpret_cast<const uint8_t*>(slope_data.data()),
129 sizeof(uint16_t) * slope_data.size())));
130 } else {
131 std::vector<float> slope_data(ComputeSize(SlopeShape()));
132 std::generate(slope_data.begin(), slope_data.end(), slope_rng);
133
134 buffers.push_back(CreateBuffer(
135 builder, builder.CreateVector(
136 reinterpret_cast<const uint8_t*>(slope_data.data()),
137 sizeof(float) * slope_data.size())));
138 }
139
140 std::vector<flatbuffers::Offset<Tensor>> tensors;
141 std::vector<flatbuffers::Offset<Operator>> operators;
142 if (FP16Weights()) {
143 tensors.emplace_back(CreateTensor(
144 builder,
145 builder.CreateVector<int32_t>(SlopeShape().data(), SlopeShape().size()),
146 TensorType_FLOAT16, /*buffer=*/1));
147 } else if (SparseWeights()) {
148 const int dims_count = SlopeShape().size();
149 std::vector<flatbuffers::Offset<DimensionMetadata>> dim_metadata(
150 dims_count);
151 std::vector<int> traversal_order(dims_count);
152 for (int i = 0; i < dims_count; i++) {
153 traversal_order[i] = i;
154 dim_metadata[i] = CreateDimensionMetadata(builder, DimensionType_DENSE,
155 SlopeShape()[i]);
156 }
157 const flatbuffers::Offset<SparsityParameters> sparsity_param =
158 CreateSparsityParameters(builder, builder.CreateVector(traversal_order),
159 0, builder.CreateVector(dim_metadata));
160 tensors.emplace_back(CreateTensor(
161 builder,
162 builder.CreateVector<int32_t>(SlopeShape().data(), SlopeShape().size()),
163 TensorType_FLOAT32, /*buffer=*/1, /*name=*/0, /*quantization=*/0,
164 /*is_variable=*/false, /*sparsity=*/sparsity_param));
165 }
166 if (FP16Weights()) {
167 const std::array<int32_t, 1> dequantize_inputs{{0}};
168 const std::array<int32_t, 1> dequantize_outputs{{2}};
169 operators.emplace_back(CreateOperator(
170 builder, /*opcode_index=*/1,
171 builder.CreateVector<int32_t>(dequantize_inputs.data(),
172 dequantize_inputs.size()),
173 builder.CreateVector<int32_t>(dequantize_outputs.data(),
174 dequantize_outputs.size())));
175 } else if (SparseWeights()) {
176 const std::array<int32_t, 1> densify_inputs{{0}};
177 const std::array<int32_t, 1> densify_outputs{{2}};
178 operators.emplace_back(
179 CreateOperator(builder, /*opcode_index=*/1,
180 builder.CreateVector<int32_t>(densify_inputs.data(),
181 densify_inputs.size()),
182 builder.CreateVector<int32_t>(densify_outputs.data(),
183 densify_outputs.size())));
184 }
185 tensors.emplace_back(CreateTensor(
186 builder,
187 builder.CreateVector<int32_t>(InputShape().data(), InputShape().size()),
188 TensorType_FLOAT32));
189 tensors.emplace_back(CreateTensor(
190 builder,
191 builder.CreateVector<int32_t>(SlopeShape().data(), SlopeShape().size()),
192 TensorType_FLOAT32,
193 /*buffer=*/(FP16Weights() || SparseWeights()) ? 0 : 1));
194 tensors.emplace_back(CreateTensor(
195 builder,
196 builder.CreateVector<int32_t>(OutputShape().data(), OutputShape().size()),
197 TensorType_FLOAT32));
198
199 const std::array<int32_t, 2> op_inputs{
200 {static_cast<int>(tensors.size()) - 3,
201 static_cast<int>(tensors.size()) - 2}};
202 const std::array<int32_t, 1> op_outputs{
203 {static_cast<int>(tensors.size()) - 1}};
204 operators.emplace_back(CreateOperator(
205 builder, /*opcode_index=*/0,
206 builder.CreateVector<int32_t>(op_inputs.data(), op_inputs.size()),
207 builder.CreateVector<int32_t>(op_outputs.data(), op_outputs.size())));
208
209 const std::array<int32_t, 1> subgraph_inputs{
210 {static_cast<int32_t>(tensors.size() - 3)}};
211 const std::array<int32_t, 1> subgraph_outputs{
212 {static_cast<int32_t>(tensors.size()) - 1}};
213 flatbuffers::Offset<SubGraph> subgraph = CreateSubGraph(
214 builder, builder.CreateVector(tensors.data(), tensors.size()),
215 builder.CreateVector<int32_t>(subgraph_inputs.data(),
216 subgraph_inputs.size()),
217 builder.CreateVector<int32_t>(subgraph_outputs.data(),
218 subgraph_outputs.size()),
219 builder.CreateVector(operators.data(), operators.size()));
220
221 flatbuffers::Offset<flatbuffers::String> description =
222 builder.CreateString("PReLU model");
223
224 flatbuffers::Offset<Model> model_buffer = CreateModel(
225 builder, TFLITE_SCHEMA_VERSION,
226 builder.CreateVector(operator_codes.data(), operator_codes.size()),
227 builder.CreateVector(&subgraph, 1), description,
228 builder.CreateVector(buffers.data(), buffers.size()));
229
230 builder.Finish(model_buffer);
231
232 return std::vector<char>(builder.GetBufferPointer(),
233 builder.GetBufferPointer() + builder.GetSize());
234 }
235
ComputeSize(const std::vector<int32_t> & shape)236 int32_t PreluTester::ComputeSize(const std::vector<int32_t>& shape) {
237 return std::accumulate(shape.cbegin(), shape.cend(), 1,
238 std::multiplies<int32_t>());
239 }
240
241 } // namespace xnnpack
242 } // namespace tflite
243