• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <armnn_delegate.hpp>
9 
10 #include <flatbuffers/flatbuffers.h>
11 #include <tensorflow/lite/interpreter.h>
12 #include <tensorflow/lite/kernels/register.h>
13 #include <tensorflow/lite/model.h>
14 #include <tensorflow/lite/schema/schema_generated.h>
15 #include <tensorflow/lite/version.h>
16 
17 #include <doctest/doctest.h>
18 
19 namespace
20 {
CreateTransposeTfLiteModel(tflite::TensorType tensorType,const std::vector<int32_t> & input0TensorShape,const std::vector<int32_t> & inputPermVecShape,const std::vector<int32_t> & outputTensorShape,const std::vector<int32_t> & inputPermVec)21 std::vector<char> CreateTransposeTfLiteModel(tflite::TensorType tensorType,
22                                              const std::vector <int32_t>& input0TensorShape,
23                                              const std::vector <int32_t>& inputPermVecShape,
24                                              const std::vector <int32_t>& outputTensorShape,
25                                              const std::vector<int32_t>& inputPermVec)
26 {
27     using namespace tflite;
28     flatbuffers::FlatBufferBuilder flatBufferBuilder;
29     std::array<flatbuffers::Offset<tflite::Buffer>, 2> buffers;
30     buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
31     buffers[1] = CreateBuffer(flatBufferBuilder,
32                               flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(inputPermVec.data()),
33                                                              sizeof(int32_t) * inputPermVec.size()));
34     std::array<flatbuffers::Offset<Tensor>, 3> tensors;
35     tensors[0] = CreateTensor(flatBufferBuilder,
36                               flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
37                                                                       input0TensorShape.size()),
38                               tensorType, 0);
39     tensors[1] = CreateTensor(flatBufferBuilder,
40                               flatBufferBuilder.CreateVector<int32_t>(inputPermVecShape.data(),
41                                                                       inputPermVecShape.size()),
42                               tflite::TensorType_INT32, 1,
43                               flatBufferBuilder.CreateString("permutation_vector"));
44     tensors[2] = CreateTensor(flatBufferBuilder,
45                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
46                                                                       outputTensorShape.size()),
47                               tensorType);
48     const std::vector<int32_t> operatorInputs{ {0, 1} };
49     const std::vector<int32_t> operatorOutputs{{2}};
50     flatbuffers::Offset <Operator> transposeOperator =
51         CreateOperator(flatBufferBuilder,
52                        0,
53                        flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
54                        flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
55                        BuiltinOptions_TransposeOptions,
56                        CreateTransposeOptions(flatBufferBuilder).Union());
57     const std::vector<int> subgraphInputs{ {0, 1} };
58     const std::vector<int> subgraphOutputs{{2}};
59     flatbuffers::Offset <SubGraph> subgraph =
60         CreateSubGraph(flatBufferBuilder,
61                        flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
62                        flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
63                        flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
64                        flatBufferBuilder.CreateVector(&transposeOperator, 1));
65     flatbuffers::Offset <flatbuffers::String> modelDescription =
66         flatBufferBuilder.CreateString("ArmnnDelegate: Transpose Operator Model");
67     flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
68                                                                          tflite::BuiltinOperator_TRANSPOSE);
69     flatbuffers::Offset <Model> flatbufferModel =
70         CreateModel(flatBufferBuilder,
71                     TFLITE_SCHEMA_VERSION,
72                     flatBufferBuilder.CreateVector(&operatorCode, 1),
73                     flatBufferBuilder.CreateVector(&subgraph, 1),
74                     modelDescription,
75                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
76     flatBufferBuilder.Finish(flatbufferModel);
77     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
78                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
79 }
80 
TransposeFP32Test(std::vector<armnn::BackendId> & backends)81 void TransposeFP32Test(std::vector<armnn::BackendId>& backends)
82 {
83     using namespace tflite;
84 
85     // set test input data
86     std::vector<int32_t> input0Shape {4, 2, 3};
87     std::vector<int32_t> inputPermVecShape {3};
88     std::vector<int32_t> outputShape {2, 3, 4};
89 
90     std::vector<float> input0Values = {0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11,
91                                        12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23};
92     std::vector<int32_t> inputPermVec = {2, 0, 1};
93     std::vector<float> expectedOutputValues = {0, 3, 6, 9, 12, 15, 18, 21, 1, 4, 7, 10,
94                                                13, 16, 19, 22, 2, 5, 8, 11, 14, 17, 20, 23};
95 
96     // create model
97     std::vector<char> modelBuffer = CreateTransposeTfLiteModel(::tflite::TensorType_FLOAT32,
98                                                                input0Shape,
99                                                                inputPermVecShape,
100                                                                outputShape,
101                                                                inputPermVec);
102 
103     const Model* tfLiteModel = GetModel(modelBuffer.data());
104     // Create TfLite Interpreters
105     std::unique_ptr<Interpreter> armnnDelegateInterpreter;
106     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
107               (&armnnDelegateInterpreter) == kTfLiteOk);
108     CHECK(armnnDelegateInterpreter != nullptr);
109     CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
110 
111     std::unique_ptr<Interpreter> tfLiteInterpreter;
112     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
113               (&tfLiteInterpreter) == kTfLiteOk);
114     CHECK(tfLiteInterpreter != nullptr);
115     CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
116 
117     // Create the ArmNN Delegate
118     armnnDelegate::DelegateOptions delegateOptions(backends);
119     std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
120         theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
121                          armnnDelegate::TfLiteArmnnDelegateDelete);
122     CHECK(theArmnnDelegate != nullptr);
123     // Modify armnnDelegateInterpreter to use armnnDelegate
124     CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
125 
126     // Set input data for tflite
127     auto tfLiteInterpreterInput0Id = tfLiteInterpreter->inputs()[0];
128     auto tfLiteInterpreterInput0Data = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterInput0Id);
129     for (unsigned int i = 0; i < input0Values.size(); ++i)
130     {
131         tfLiteInterpreterInput0Data[i] = input0Values[i];
132     }
133 
134     auto tfLiteInterpreterInput1Id = tfLiteInterpreter->inputs()[1];
135     auto tfLiteInterpreterInput1Data = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteInterpreterInput1Id);
136     for (unsigned int i = 0; i < inputPermVec.size(); ++i)
137     {
138         tfLiteInterpreterInput1Data[i] = inputPermVec[i];
139     }
140 
141     //Set input data for armnn delegate
142     auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0];
143     auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInput0Id);
144     for (unsigned int i = 0; i < input0Values.size(); ++i)
145     {
146         armnnDelegateInput0Data[i] = input0Values[i];
147     }
148 
149     auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1];
150     auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor<int32_t>(armnnDelegateInput1Id);
151     for (unsigned int i = 0; i < inputPermVec.size(); ++i)
152     {
153         armnnDelegateInput1Data[i] = inputPermVec[i];
154     }
155 
156     // Run EnqueWorkload
157     CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
158     CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
159 
160     // Compare output data
161     auto tfLiteInterpreterOutputId = tfLiteInterpreter->outputs()[0];
162     auto tfLiteInterpreterOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterOutputId);
163     auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
164     auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
165     for (size_t i = 0; i < expectedOutputValues.size(); ++i)
166     {
167         CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
168         CHECK(tfLiteInterpreterOutputData[i] == expectedOutputValues[i]);
169         CHECK(tfLiteInterpreterOutputData[i] == armnnDelegateOutputData[i]);
170     }
171 
172     armnnDelegateInterpreter.reset(nullptr);
173 }
174 }
175