• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <armnn_delegate.hpp>
9 
10 #include <flatbuffers/flatbuffers.h>
11 #include <tensorflow/lite/interpreter.h>
12 #include <tensorflow/lite/kernels/register.h>
13 #include <tensorflow/lite/model.h>
14 #include <tensorflow/lite/schema/schema_generated.h>
15 #include <tensorflow/lite/version.h>
16 
17 #include <doctest/doctest.h>
18 
19 namespace
20 {
21 
CreateActivationTfLiteModel(tflite::BuiltinOperator activationOperatorCode,tflite::TensorType tensorType,const std::vector<int32_t> & tensorShape)22 std::vector<char> CreateActivationTfLiteModel(tflite::BuiltinOperator activationOperatorCode,
23                                               tflite::TensorType tensorType,
24                                               const std::vector <int32_t>& tensorShape)
25 {
26     using namespace tflite;
27     flatbuffers::FlatBufferBuilder flatBufferBuilder;
28 
29     std::array<flatbuffers::Offset<tflite::Buffer>, 1> buffers;
30     buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
31 
32     std::array<flatbuffers::Offset<Tensor>, 2> tensors;
33     tensors[0] = CreateTensor(flatBufferBuilder,
34                               flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()),
35                               tensorType);
36     tensors[1] = CreateTensor(flatBufferBuilder,
37                               flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()),
38                               tensorType);
39 
40     // create operator
41     const std::vector<int> operatorInputs{{0}};
42     const std::vector<int> operatorOutputs{{1}};
43     flatbuffers::Offset <Operator> unaryOperator =
44         CreateOperator(flatBufferBuilder,
45                        0,
46                        flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
47                        flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()));
48 
49     const std::vector<int> subgraphInputs{{0}};
50     const std::vector<int> subgraphOutputs{{1}};
51     flatbuffers::Offset <SubGraph> subgraph =
52         CreateSubGraph(flatBufferBuilder,
53                        flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
54                        flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
55                        flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
56                        flatBufferBuilder.CreateVector(&unaryOperator, 1));
57 
58     flatbuffers::Offset <flatbuffers::String> modelDescription =
59         flatBufferBuilder.CreateString("ArmnnDelegate: Activation Operator Model");
60     flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, activationOperatorCode);
61 
62     flatbuffers::Offset <Model> flatbufferModel =
63         CreateModel(flatBufferBuilder,
64                     TFLITE_SCHEMA_VERSION,
65                     flatBufferBuilder.CreateVector(&operatorCode, 1),
66                     flatBufferBuilder.CreateVector(&subgraph, 1),
67                     modelDescription,
68                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
69 
70     flatBufferBuilder.Finish(flatbufferModel);
71 
72     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
73                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
74 }
75 
ActivationTest(tflite::BuiltinOperator activationOperatorCode,std::vector<armnn::BackendId> & backends,std::vector<float> & inputValues,std::vector<float> & expectedOutputValues)76 void ActivationTest(tflite::BuiltinOperator activationOperatorCode,
77                     std::vector<armnn::BackendId>& backends,
78                     std::vector<float>& inputValues,
79                     std::vector<float>& expectedOutputValues)
80 {
81     using namespace tflite;
82     const std::vector<int32_t> inputShape  { { 4, 1, 4} };
83     std::vector<char> modelBuffer = CreateActivationTfLiteModel(activationOperatorCode,
84                                                                       ::tflite::TensorType_FLOAT32,
85                                                                       inputShape);
86 
87     const Model* tfLiteModel = GetModel(modelBuffer.data());
88     // Create TfLite Interpreters
89     std::unique_ptr<Interpreter> armnnDelegateInterpreter;
90     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
91               (&armnnDelegateInterpreter) == kTfLiteOk);
92     CHECK(armnnDelegateInterpreter != nullptr);
93     CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
94 
95     std::unique_ptr<Interpreter> tfLiteInterpreter;
96     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
97               (&tfLiteInterpreter) == kTfLiteOk);
98     CHECK(tfLiteInterpreter != nullptr);
99     CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
100 
101     // Create the ArmNN Delegate
102     armnnDelegate::DelegateOptions delegateOptions(backends);
103     std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
104                         theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
105                                          armnnDelegate::TfLiteArmnnDelegateDelete);
106     CHECK(theArmnnDelegate != nullptr);
107     // Modify armnnDelegateInterpreter to use armnnDelegate
108     CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
109 
110     // Set input data
111     auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
112     auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
113     for (unsigned int i = 0; i < inputValues.size(); ++i)
114     {
115         tfLiteDelageInputData[i] = inputValues[i];
116     }
117 
118     auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
119     auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
120     for (unsigned int i = 0; i < inputValues.size(); ++i)
121     {
122         armnnDelegateInputData[i] = inputValues[i];
123     }
124     // Run EnqueWorkload
125     CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
126     CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
127 
128     // Compare output data
129     auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
130     auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
131     auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
132     auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
133     for (size_t i = 0; i < inputValues.size(); i++)
134     {
135         CHECK(expectedOutputValues[i] == doctest::Approx(armnnDelegateOutputData[i]));
136         CHECK(tfLiteDelageOutputData[i] == doctest::Approx(armnnDelegateOutputData[i]));
137     }
138 }
139 
140 } // anonymous namespace