• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <armnn_delegate.hpp>
9 #include <armnnUtils/FloatingPointComparison.hpp>
10 
11 #include <flatbuffers/flatbuffers.h>
12 #include <tensorflow/lite/interpreter.h>
13 #include <tensorflow/lite/kernels/register.h>
14 #include <tensorflow/lite/model.h>
15 #include <tensorflow/lite/schema/schema_generated.h>
16 #include <tensorflow/lite/version.h>
17 
18 #include <doctest/doctest.h>
19 
20 namespace
21 {
CreateSoftmaxTfLiteModel(tflite::BuiltinOperator softmaxOperatorCode,tflite::TensorType tensorType,const std::vector<int32_t> & tensorShape,float beta)22 std::vector<char> CreateSoftmaxTfLiteModel(tflite::BuiltinOperator softmaxOperatorCode,
23                                            tflite::TensorType tensorType,
24                                            const std::vector <int32_t>& tensorShape,
25                                            float beta)
26 {
27     using namespace tflite;
28     flatbuffers::FlatBufferBuilder flatBufferBuilder;
29 
30     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
31     buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
32 
33     std::array<flatbuffers::Offset<Tensor>, 2> tensors;
34     tensors[0] = CreateTensor(flatBufferBuilder,
35                               flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
36                                                                       tensorShape.size()),
37                               tensorType,
38                               0);
39     tensors[1] = CreateTensor(flatBufferBuilder,
40                               flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
41                                                                       tensorShape.size()),
42                               tensorType,
43                               0);
44 
45     const std::vector<int32_t> operatorInputs({0});
46     const std::vector<int32_t> operatorOutputs({1});
47 
48     flatbuffers::Offset<Operator> softmaxOperator;
49     flatbuffers::Offset<flatbuffers::String> modelDescription;
50     flatbuffers::Offset<OperatorCode> operatorCode;
51 
52     switch (softmaxOperatorCode)
53     {
54         case tflite::BuiltinOperator_SOFTMAX:
55             softmaxOperator =
56                 CreateOperator(flatBufferBuilder,
57                                0,
58                                flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
59                                flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
60                                BuiltinOptions_SoftmaxOptions,
61                                CreateSoftmaxOptions(flatBufferBuilder, beta).Union());
62                 modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: Softmax Operator Model");
63                 operatorCode = CreateOperatorCode(flatBufferBuilder,
64                                  tflite::BuiltinOperator_SOFTMAX);
65             break;
66         case tflite::BuiltinOperator_LOG_SOFTMAX:
67             softmaxOperator =
68                 CreateOperator(flatBufferBuilder,
69                                0,
70                                flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
71                                flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
72                                BuiltinOptions_LogSoftmaxOptions,
73                                CreateLogSoftmaxOptions(flatBufferBuilder).Union());
74                 flatBufferBuilder.CreateString("ArmnnDelegate: Log-Softmax Operator Model");
75             operatorCode = CreateOperatorCode(flatBufferBuilder,
76                                               tflite::BuiltinOperator_LOG_SOFTMAX);
77             break;
78         default:
79             break;
80     }
81     const std::vector<int32_t> subgraphInputs({0});
82     const std::vector<int32_t> subgraphOutputs({1});
83     flatbuffers::Offset<SubGraph> subgraph =
84         CreateSubGraph(flatBufferBuilder,
85                        flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
86                        flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
87                        flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
88                        flatBufferBuilder.CreateVector(&softmaxOperator, 1));
89     flatbuffers::Offset<Model> flatbufferModel =
90         CreateModel(flatBufferBuilder,
91                     TFLITE_SCHEMA_VERSION,
92                     flatBufferBuilder.CreateVector(&operatorCode, 1),
93                     flatBufferBuilder.CreateVector(&subgraph, 1),
94                     modelDescription,
95                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
96     flatBufferBuilder.Finish(flatbufferModel);
97     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
98                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
99 }
100 
SoftmaxTest(tflite::BuiltinOperator softmaxOperatorCode,tflite::TensorType tensorType,std::vector<armnn::BackendId> & backends,std::vector<int32_t> & shape,std::vector<float> & inputValues,std::vector<float> & expectedOutputValues,float beta=0)101 void SoftmaxTest(tflite::BuiltinOperator softmaxOperatorCode,
102                  tflite::TensorType tensorType,
103                  std::vector<armnn::BackendId>& backends,
104                  std::vector<int32_t>& shape,
105                  std::vector<float>& inputValues,
106                  std::vector<float>& expectedOutputValues,
107                  float beta = 0)
108 {
109     using namespace tflite;
110     std::vector<char> modelBuffer = CreateSoftmaxTfLiteModel(softmaxOperatorCode,
111                                                              tensorType,
112                                                              shape,
113                                                              beta);
114 
115     const Model* tfLiteModel = GetModel(modelBuffer.data());
116     // Create TfLite Interpreters
117     std::unique_ptr<Interpreter> armnnDelegateInterpreter;
118     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
119                   (&armnnDelegateInterpreter) == kTfLiteOk);
120     CHECK(armnnDelegateInterpreter != nullptr);
121     CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
122 
123     std::unique_ptr<Interpreter> tfLiteInterpreter;
124     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
125                   (&tfLiteInterpreter) == kTfLiteOk);
126     CHECK(tfLiteInterpreter != nullptr);
127     CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
128 
129     // Create the ArmNN Delegate
130     armnnDelegate::DelegateOptions delegateOptions(backends);
131     std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
132         theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
133                          armnnDelegate::TfLiteArmnnDelegateDelete);
134     CHECK(theArmnnDelegate != nullptr);
135     // Modify armnnDelegateInterpreter to use armnnDelegate
136     CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
137 
138     // Set input data
139     auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
140     auto tfLiteInterpreterInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
141     for (unsigned int i = 0; i < inputValues.size(); ++i)
142     {
143         tfLiteInterpreterInputData[i] = inputValues[i];
144     }
145 
146     auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
147     auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
148     for (unsigned int i = 0; i < inputValues.size(); ++i)
149     {
150         armnnDelegateInputData[i] = inputValues[i];
151     }
152     // Run EnqueWorkload
153     CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
154     CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
155 
156     // Compare output data
157     auto tfLiteInterpreterOutputId = tfLiteInterpreter->outputs()[0];
158     auto tfLiteInterpreterOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterOutputId);
159     auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
160     auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
161 
162     for (size_t i = 0; i < inputValues.size(); ++i)
163     {
164          CHECK(armnnUtils::within_percentage_tolerance(expectedOutputValues[i], armnnDelegateOutputData[i], 0.1));
165          CHECK(armnnUtils::within_percentage_tolerance(tfLiteInterpreterOutputData[i],
166                                                        armnnDelegateOutputData[i], 0.1));
167     }
168 }
169 
170 } // anonymous namespace
171