• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "TestUtils.hpp"
9 
10 #include <armnn_delegate.hpp>
11 
12 #include <flatbuffers/flatbuffers.h>
13 #include <tensorflow/lite/interpreter.h>
14 #include <tensorflow/lite/kernels/register.h>
15 #include <tensorflow/lite/model.h>
16 #include <tensorflow/lite/schema/schema_generated.h>
17 #include <tensorflow/lite/version.h>
18 
19 #include <doctest/doctest.h>
20 
21 #include <string>
22 
23 namespace
24 {
25 
CreateConcatTfLiteModel(tflite::BuiltinOperator controlOperatorCode,tflite::TensorType tensorType,std::vector<int32_t> & inputTensorShape,const std::vector<int32_t> & outputTensorShape,const int32_t inputTensorNum,int32_t axis=0,float quantScale=1.0f,int quantOffset=0)26 std::vector<char> CreateConcatTfLiteModel(tflite::BuiltinOperator controlOperatorCode,
27                                           tflite::TensorType tensorType,
28                                           std::vector<int32_t>& inputTensorShape,
29                                           const std::vector <int32_t>& outputTensorShape,
30                                           const int32_t inputTensorNum,
31                                           int32_t axis = 0,
32                                           float quantScale = 1.0f,
33                                           int quantOffset  = 0)
34 {
35     using namespace tflite;
36     flatbuffers::FlatBufferBuilder flatBufferBuilder;
37 
38     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
39     buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
40 
41     auto quantizationParameters =
42             CreateQuantizationParameters(flatBufferBuilder,
43                                          0,
44                                          0,
45                                          flatBufferBuilder.CreateVector<float>({ quantScale }),
46                                          flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
47 
48     std::vector<int32_t> operatorInputs{};
49     const std::vector<int32_t> operatorOutputs{inputTensorNum};
50     std::vector<int> subgraphInputs{};
51     const std::vector<int> subgraphOutputs{inputTensorNum};
52 
53     std::vector<flatbuffers::Offset<Tensor>> tensors(inputTensorNum + 1);
54     for (int i = 0; i < inputTensorNum; ++i)
55     {
56         tensors[i] = CreateTensor(flatBufferBuilder,
57                                   flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
58                                                                           inputTensorShape.size()),
59                                   tensorType,
60                                   0,
61                                   flatBufferBuilder.CreateString("input" + std::to_string(i)),
62                                   quantizationParameters);
63 
64         // Add number of inputs to vector.
65         operatorInputs.push_back(i);
66         subgraphInputs.push_back(i);
67     }
68 
69     // Create output tensor
70     tensors[inputTensorNum] = CreateTensor(flatBufferBuilder,
71                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
72                                                                       outputTensorShape.size()),
73                               tensorType,
74                               0,
75                               flatBufferBuilder.CreateString("output"),
76                               quantizationParameters);
77 
78     // create operator
79     tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_ConcatenationOptions;
80     flatbuffers::Offset<void> operatorBuiltinOptions = CreateConcatenationOptions(flatBufferBuilder, axis).Union();
81 
82     flatbuffers::Offset <Operator> controlOperator =
83             CreateOperator(flatBufferBuilder,
84                            0,
85                            flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
86                            flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
87                            operatorBuiltinOptionsType,
88                            operatorBuiltinOptions);
89 
90     flatbuffers::Offset <SubGraph> subgraph =
91             CreateSubGraph(flatBufferBuilder,
92                            flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
93                            flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
94                            flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
95                            flatBufferBuilder.CreateVector(&controlOperator, 1));
96 
97     flatbuffers::Offset <flatbuffers::String> modelDescription =
98             flatBufferBuilder.CreateString("ArmnnDelegate: Concatenation Operator Model");
99     flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, controlOperatorCode);
100 
101     flatbuffers::Offset <Model> flatbufferModel =
102             CreateModel(flatBufferBuilder,
103                         TFLITE_SCHEMA_VERSION,
104                         flatBufferBuilder.CreateVector(&operatorCode, 1),
105                         flatBufferBuilder.CreateVector(&subgraph, 1),
106                         modelDescription,
107                         flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
108 
109     flatBufferBuilder.Finish(flatbufferModel);
110 
111     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
112                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
113 }
114 
CreateMeanTfLiteModel(tflite::BuiltinOperator controlOperatorCode,tflite::TensorType tensorType,std::vector<int32_t> & input0TensorShape,std::vector<int32_t> & input1TensorShape,const std::vector<int32_t> & outputTensorShape,std::vector<int32_t> & axisData,const bool keepDims,float quantScale=1.0f,int quantOffset=0)115 std::vector<char> CreateMeanTfLiteModel(tflite::BuiltinOperator controlOperatorCode,
116                                         tflite::TensorType tensorType,
117                                         std::vector<int32_t>& input0TensorShape,
118                                         std::vector<int32_t>& input1TensorShape,
119                                         const std::vector <int32_t>& outputTensorShape,
120                                         std::vector<int32_t>& axisData,
121                                         const bool keepDims,
122                                         float quantScale = 1.0f,
123                                         int quantOffset  = 0)
124 {
125     using namespace tflite;
126     flatbuffers::FlatBufferBuilder flatBufferBuilder;
127 
128     std::array<flatbuffers::Offset<tflite::Buffer>, 2> buffers;
129     buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
130     buffers[1] = CreateBuffer(flatBufferBuilder,
131                               flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
132                                                              sizeof(int32_t) * axisData.size()));
133 
134     auto quantizationParameters =
135             CreateQuantizationParameters(flatBufferBuilder,
136                                          0,
137                                          0,
138                                          flatBufferBuilder.CreateVector<float>({ quantScale }),
139                                          flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
140 
141     std::array<flatbuffers::Offset<Tensor>, 3> tensors;
142     tensors[0] = CreateTensor(flatBufferBuilder,
143                               flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
144                                                                       input0TensorShape.size()),
145                               tensorType,
146                               0,
147                               flatBufferBuilder.CreateString("input"),
148                               quantizationParameters);
149 
150     tensors[1] = CreateTensor(flatBufferBuilder,
151                               flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
152                                                                       input1TensorShape.size()),
153                               ::tflite::TensorType_INT32,
154                               1,
155                               flatBufferBuilder.CreateString("axis"),
156                               quantizationParameters);
157 
158     // Create output tensor
159     tensors[2] = CreateTensor(flatBufferBuilder,
160                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
161                                                                       outputTensorShape.size()),
162                               tensorType,
163                               0,
164                               flatBufferBuilder.CreateString("output"),
165                               quantizationParameters);
166 
167     // create operator. Mean uses ReducerOptions.
168     tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_ReducerOptions;
169     flatbuffers::Offset<void> operatorBuiltinOptions = CreateReducerOptions(flatBufferBuilder, keepDims).Union();
170 
171     const std::vector<int> operatorInputs{ {0, 1} };
172     const std::vector<int> operatorOutputs{ 2 };
173     flatbuffers::Offset <Operator> controlOperator =
174             CreateOperator(flatBufferBuilder,
175                            0,
176                            flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
177                            flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
178                            operatorBuiltinOptionsType,
179                            operatorBuiltinOptions);
180 
181     const std::vector<int> subgraphInputs{ {0, 1} };
182     const std::vector<int> subgraphOutputs{ 2 };
183     flatbuffers::Offset <SubGraph> subgraph =
184             CreateSubGraph(flatBufferBuilder,
185                            flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
186                            flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
187                            flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
188                            flatBufferBuilder.CreateVector(&controlOperator, 1));
189 
190     flatbuffers::Offset <flatbuffers::String> modelDescription =
191             flatBufferBuilder.CreateString("ArmnnDelegate: Mean Operator Model");
192     flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, controlOperatorCode);
193 
194     flatbuffers::Offset <Model> flatbufferModel =
195             CreateModel(flatBufferBuilder,
196                         TFLITE_SCHEMA_VERSION,
197                         flatBufferBuilder.CreateVector(&operatorCode, 1),
198                         flatBufferBuilder.CreateVector(&subgraph, 1),
199                         modelDescription,
200                         flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
201 
202     flatBufferBuilder.Finish(flatbufferModel);
203 
204     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
205                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
206 }
207 
208 template <typename T>
ConcatenationTest(tflite::BuiltinOperator controlOperatorCode,tflite::TensorType tensorType,std::vector<armnn::BackendId> & backends,std::vector<int32_t> & inputShapes,std::vector<int32_t> & expectedOutputShape,std::vector<std::vector<T>> & inputValues,std::vector<T> & expectedOutputValues,int32_t axis=0,float quantScale=1.0f,int quantOffset=0)209 void ConcatenationTest(tflite::BuiltinOperator controlOperatorCode,
210                        tflite::TensorType tensorType,
211                        std::vector<armnn::BackendId>& backends,
212                        std::vector<int32_t>& inputShapes,
213                        std::vector<int32_t>& expectedOutputShape,
214                        std::vector<std::vector<T>>& inputValues,
215                        std::vector<T>& expectedOutputValues,
216                        int32_t axis = 0,
217                        float quantScale = 1.0f,
218                        int quantOffset  = 0)
219 {
220     using namespace tflite;
221     std::vector<char> modelBuffer = CreateConcatTfLiteModel(controlOperatorCode,
222                                                             tensorType,
223                                                             inputShapes,
224                                                             expectedOutputShape,
225                                                             inputValues.size(),
226                                                             axis,
227                                                             quantScale,
228                                                             quantOffset);
229 
230     const Model* tfLiteModel = GetModel(modelBuffer.data());
231 
232     // Create TfLite Interpreters
233     std::unique_ptr<Interpreter> armnnDelegateInterpreter;
234     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
235                   (&armnnDelegateInterpreter) == kTfLiteOk);
236     CHECK(armnnDelegateInterpreter != nullptr);
237     CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
238 
239     std::unique_ptr<Interpreter> tfLiteInterpreter;
240     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
241                   (&tfLiteInterpreter) == kTfLiteOk);
242     CHECK(tfLiteInterpreter != nullptr);
243     CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
244 
245     // Create the ArmNN Delegate
246     armnnDelegate::DelegateOptions delegateOptions(backends);
247     std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
248             theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
249                              armnnDelegate::TfLiteArmnnDelegateDelete);
250     CHECK(theArmnnDelegate != nullptr);
251 
252     // Modify armnnDelegateInterpreter to use armnnDelegate
253     CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
254 
255     // Set input data for all input tensors.
256     for (unsigned int i = 0; i < inputValues.size(); ++i)
257     {
258         // Get single input tensor and assign to interpreters.
259         auto inputTensorValues = inputValues[i];
260         armnnDelegate::FillInput<T>(tfLiteInterpreter, i, inputTensorValues);
261         armnnDelegate::FillInput<T>(armnnDelegateInterpreter, i, inputTensorValues);
262     }
263 
264     // Run EnqueWorkload
265     CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
266     CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
267 
268     // Compare output data
269     armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
270                                         armnnDelegateInterpreter,
271                                         expectedOutputShape,
272                                         expectedOutputValues);
273 
274     armnnDelegateInterpreter.reset(nullptr);
275 }
276 
277 template <typename T>
MeanTest(tflite::BuiltinOperator controlOperatorCode,tflite::TensorType tensorType,std::vector<armnn::BackendId> & backends,std::vector<int32_t> & input0Shape,std::vector<int32_t> & input1Shape,std::vector<int32_t> & expectedOutputShape,std::vector<T> & input0Values,std::vector<int32_t> & input1Values,std::vector<T> & expectedOutputValues,const bool keepDims,float quantScale=1.0f,int quantOffset=0)278 void MeanTest(tflite::BuiltinOperator controlOperatorCode,
279               tflite::TensorType tensorType,
280               std::vector<armnn::BackendId>& backends,
281               std::vector<int32_t>& input0Shape,
282               std::vector<int32_t>& input1Shape,
283               std::vector<int32_t>& expectedOutputShape,
284               std::vector<T>& input0Values,
285               std::vector<int32_t>& input1Values,
286               std::vector<T>& expectedOutputValues,
287               const bool keepDims,
288               float quantScale = 1.0f,
289               int quantOffset  = 0)
290 {
291     using namespace tflite;
292     std::vector<char> modelBuffer = CreateMeanTfLiteModel(controlOperatorCode,
293                                                           tensorType,
294                                                           input0Shape,
295                                                           input1Shape,
296                                                           expectedOutputShape,
297                                                           input1Values,
298                                                           keepDims,
299                                                           quantScale,
300                                                           quantOffset);
301 
302     const Model* tfLiteModel = GetModel(modelBuffer.data());
303 
304     // Create TfLite Interpreters
305     std::unique_ptr<Interpreter> armnnDelegateInterpreter;
306     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
307                   (&armnnDelegateInterpreter) == kTfLiteOk);
308     CHECK(armnnDelegateInterpreter != nullptr);
309     CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
310 
311     std::unique_ptr<Interpreter> tfLiteInterpreter;
312     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
313                   (&tfLiteInterpreter) == kTfLiteOk);
314     CHECK(tfLiteInterpreter != nullptr);
315     CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
316 
317     // Create the ArmNN Delegate
318     armnnDelegate::DelegateOptions delegateOptions(backends);
319     std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
320             theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
321                              armnnDelegate::TfLiteArmnnDelegateDelete);
322     CHECK(theArmnnDelegate != nullptr);
323 
324     // Modify armnnDelegateInterpreter to use armnnDelegate
325     CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
326 
327     // Set input data
328     armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, input0Values);
329     armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, input0Values);
330 
331     // Run EnqueWorkload
332     CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
333     CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
334 
335     // Compare output data
336     armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
337                                         armnnDelegateInterpreter,
338                                         expectedOutputShape,
339                                         expectedOutputValues);
340 
341     armnnDelegateInterpreter.reset(nullptr);
342 }
343 
344 } // anonymous namespace