• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <armnn_delegate.hpp>
9 
10 #include <flatbuffers/flatbuffers.h>
11 #include <tensorflow/lite/interpreter.h>
12 #include <tensorflow/lite/kernels/register.h>
13 #include <tensorflow/lite/model.h>
14 #include <tensorflow/lite/schema/schema_generated.h>
15 #include <tensorflow/lite/version.h>
16 
17 #include <doctest/doctest.h>
18 
19 namespace
20 {
21 
CreateElementwiseBinaryTfLiteModel(tflite::BuiltinOperator binaryOperatorCode,tflite::ActivationFunctionType activationType,tflite::TensorType tensorType,const std::vector<int32_t> & input0TensorShape,const std::vector<int32_t> & input1TensorShape,const std::vector<int32_t> & outputTensorShape,float quantScale=1.0f,int quantOffset=0)22 std::vector<char> CreateElementwiseBinaryTfLiteModel(tflite::BuiltinOperator binaryOperatorCode,
23                                                      tflite::ActivationFunctionType activationType,
24                                                      tflite::TensorType tensorType,
25                                                      const std::vector <int32_t>& input0TensorShape,
26                                                      const std::vector <int32_t>& input1TensorShape,
27                                                      const std::vector <int32_t>& outputTensorShape,
28                                                      float quantScale = 1.0f,
29                                                      int quantOffset  = 0)
30 {
31     using namespace tflite;
32     flatbuffers::FlatBufferBuilder flatBufferBuilder;
33 
34     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
35     buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
36 
37     auto quantizationParameters =
38         CreateQuantizationParameters(flatBufferBuilder,
39                                      0,
40                                      0,
41                                      flatBufferBuilder.CreateVector<float>({ quantScale }),
42                                      flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
43 
44 
45     std::array<flatbuffers::Offset<Tensor>, 3> tensors;
46     tensors[0] = CreateTensor(flatBufferBuilder,
47                               flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
48                                                                       input0TensorShape.size()),
49                               tensorType,
50                               0,
51                               flatBufferBuilder.CreateString("input_0"),
52                               quantizationParameters);
53     tensors[1] = CreateTensor(flatBufferBuilder,
54                               flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
55                                                                       input1TensorShape.size()),
56                               tensorType,
57                               0,
58                               flatBufferBuilder.CreateString("input_1"),
59                               quantizationParameters);
60     tensors[2] = CreateTensor(flatBufferBuilder,
61                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
62                                                                       outputTensorShape.size()),
63                               tensorType,
64                               0,
65                               flatBufferBuilder.CreateString("output"),
66                               quantizationParameters);
67 
68     // create operator
69     tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE;
70     flatbuffers::Offset<void> operatorBuiltinOptions = 0;
71     switch (binaryOperatorCode)
72     {
73         case BuiltinOperator_ADD:
74         {
75             operatorBuiltinOptionsType = BuiltinOptions_AddOptions;
76             operatorBuiltinOptions = CreateAddOptions(flatBufferBuilder, activationType).Union();
77             break;
78         }
79         case BuiltinOperator_DIV:
80         {
81             operatorBuiltinOptionsType = BuiltinOptions_DivOptions;
82             operatorBuiltinOptions = CreateDivOptions(flatBufferBuilder, activationType).Union();
83             break;
84         }
85         case BuiltinOperator_MAXIMUM:
86         {
87             operatorBuiltinOptionsType = BuiltinOptions_MaximumMinimumOptions;
88             operatorBuiltinOptions = CreateMaximumMinimumOptions(flatBufferBuilder).Union();
89             break;
90         }
91         case BuiltinOperator_MINIMUM:
92         {
93             operatorBuiltinOptionsType = BuiltinOptions_MaximumMinimumOptions;
94             operatorBuiltinOptions = CreateMaximumMinimumOptions(flatBufferBuilder).Union();
95             break;
96         }
97         case BuiltinOperator_MUL:
98         {
99             operatorBuiltinOptionsType = BuiltinOptions_MulOptions;
100             operatorBuiltinOptions = CreateMulOptions(flatBufferBuilder, activationType).Union();
101             break;
102         }
103         case BuiltinOperator_SUB:
104         {
105             operatorBuiltinOptionsType = BuiltinOptions_SubOptions;
106             operatorBuiltinOptions = CreateSubOptions(flatBufferBuilder, activationType).Union();
107             break;
108         }
109         default:
110             break;
111     }
112     const std::vector<int32_t> operatorInputs{ {0, 1} };
113     const std::vector<int32_t> operatorOutputs{{2}};
114     flatbuffers::Offset <Operator> elementwiseBinaryOperator =
115         CreateOperator(flatBufferBuilder,
116                        0,
117                        flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
118                        flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
119                        operatorBuiltinOptionsType,
120                        operatorBuiltinOptions);
121 
122     const std::vector<int> subgraphInputs{ {0, 1} };
123     const std::vector<int> subgraphOutputs{{2}};
124     flatbuffers::Offset <SubGraph> subgraph =
125         CreateSubGraph(flatBufferBuilder,
126                        flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
127                        flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
128                        flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
129                        flatBufferBuilder.CreateVector(&elementwiseBinaryOperator, 1));
130 
131     flatbuffers::Offset <flatbuffers::String> modelDescription =
132         flatBufferBuilder.CreateString("ArmnnDelegate: Elementwise Binary Operator Model");
133     flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, binaryOperatorCode);
134 
135     flatbuffers::Offset <Model> flatbufferModel =
136         CreateModel(flatBufferBuilder,
137                     TFLITE_SCHEMA_VERSION,
138                     flatBufferBuilder.CreateVector(&operatorCode, 1),
139                     flatBufferBuilder.CreateVector(&subgraph, 1),
140                     modelDescription,
141                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
142 
143     flatBufferBuilder.Finish(flatbufferModel);
144 
145     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
146                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
147 }
148 
149 template <typename T>
ElementwiseBinaryTest(tflite::BuiltinOperator binaryOperatorCode,tflite::ActivationFunctionType activationType,tflite::TensorType tensorType,std::vector<armnn::BackendId> & backends,std::vector<int32_t> & input0Shape,std::vector<int32_t> & input1Shape,std::vector<int32_t> & outputShape,std::vector<T> & input0Values,std::vector<T> & input1Values,std::vector<T> & expectedOutputValues,float quantScale=1.0f,int quantOffset=0)150 void ElementwiseBinaryTest(tflite::BuiltinOperator binaryOperatorCode,
151                            tflite::ActivationFunctionType activationType,
152                            tflite::TensorType tensorType,
153                            std::vector<armnn::BackendId>& backends,
154                            std::vector<int32_t>& input0Shape,
155                            std::vector<int32_t>& input1Shape,
156                            std::vector<int32_t>& outputShape,
157                            std::vector<T>& input0Values,
158                            std::vector<T>& input1Values,
159                            std::vector<T>& expectedOutputValues,
160                            float quantScale = 1.0f,
161                            int quantOffset  = 0)
162 {
163     using namespace tflite;
164     std::vector<char> modelBuffer = CreateElementwiseBinaryTfLiteModel(binaryOperatorCode,
165                                                                        activationType,
166                                                                        tensorType,
167                                                                        input0Shape,
168                                                                        input1Shape,
169                                                                        outputShape,
170                                                                        quantScale,
171                                                                        quantOffset);
172 
173     const Model* tfLiteModel = GetModel(modelBuffer.data());
174     // Create TfLite Interpreters
175     std::unique_ptr<Interpreter> armnnDelegateInterpreter;
176     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
177               (&armnnDelegateInterpreter) == kTfLiteOk);
178     CHECK(armnnDelegateInterpreter != nullptr);
179     CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
180 
181     std::unique_ptr<Interpreter> tfLiteInterpreter;
182     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
183               (&tfLiteInterpreter) == kTfLiteOk);
184     CHECK(tfLiteInterpreter != nullptr);
185     CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
186 
187     // Create the ArmNN Delegate
188     armnnDelegate::DelegateOptions delegateOptions(backends);
189     std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
190         theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
191                          armnnDelegate::TfLiteArmnnDelegateDelete);
192     CHECK(theArmnnDelegate != nullptr);
193     // Modify armnnDelegateInterpreter to use armnnDelegate
194     CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
195 
196     // Set input data
197     auto tfLiteDelegateInput0Id = tfLiteInterpreter->inputs()[0];
198     auto tfLiteDelageInput0Data = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInput0Id);
199     for (unsigned int i = 0; i < input0Values.size(); ++i)
200     {
201         tfLiteDelageInput0Data[i] = input0Values[i];
202     }
203 
204     auto tfLiteDelegateInput1Id = tfLiteInterpreter->inputs()[1];
205     auto tfLiteDelageInput1Data = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInput1Id);
206     for (unsigned int i = 0; i < input1Values.size(); ++i)
207     {
208         tfLiteDelageInput1Data[i] = input1Values[i];
209     }
210 
211     auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0];
212     auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInput0Id);
213     for (unsigned int i = 0; i < input0Values.size(); ++i)
214     {
215         armnnDelegateInput0Data[i] = input0Values[i];
216     }
217 
218     auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1];
219     auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInput1Id);
220     for (unsigned int i = 0; i < input1Values.size(); ++i)
221     {
222         armnnDelegateInput1Data[i] = input1Values[i];
223     }
224 
225     // Run EnqueWorkload
226     CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
227     CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
228 
229     // Compare output data
230     auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
231     auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
232     auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
233     auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
234     for (size_t i = 0; i < expectedOutputValues.size(); i++)
235     {
236         CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
237         CHECK(tfLiteDelageOutputData[i] == expectedOutputValues[i]);
238         CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]);
239     }
240 
241     armnnDelegateInterpreter.reset(nullptr);
242 }
243 
244 } // anonymous namespace