• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <DelegateTestInterpreterUtils.hpp>
9 
10 #include <armnn_delegate.hpp>
11 
12 #include <armnn/BackendId.hpp>
13 #include <armnn/Exceptions.hpp>
14 
15 #include <tensorflow/lite/core/c/c_api.h>
16 #include <tensorflow/lite/kernels/kernel_util.h>
17 #include <tensorflow/lite/kernels/custom_ops_register.h>
18 #include <tensorflow/lite/kernels/register.h>
19 #include <tensorflow/lite/c/c_api_internal.h>
20 
21 namespace delegateTestInterpreter
22 {
23 
24 class DelegateTestInterpreter
25 {
26 public:
27     /// Create TfLite Interpreter only
DelegateTestInterpreter(std::vector<char> & modelBuffer,const std::string & customOp="")28     DelegateTestInterpreter(std::vector<char>& modelBuffer, const std::string& customOp = "")
29     {
30         TfLiteModel* model = delegateTestInterpreter::CreateTfLiteModel(modelBuffer);
31 
32         TfLiteInterpreterOptions* options = delegateTestInterpreter::CreateTfLiteInterpreterOptions();
33         if (!customOp.empty())
34         {
35             options->mutable_op_resolver = delegateTestInterpreter::GenerateCustomOpResolver(customOp);
36         }
37 
38         m_TfLiteInterpreter = TfLiteInterpreterCreate(model, options);
39         m_TfLiteDelegate = nullptr;
40 
41         // The options and model can be deleted after the interpreter is created.
42         TfLiteInterpreterOptionsDelete(options);
43         TfLiteModelDelete(model);
44     }
45 
46     /// Create Interpreter with default Arm NN Classic/Opaque Delegate applied
47     DelegateTestInterpreter(std::vector<char>& model,
48                             const std::vector<armnn::BackendId>& backends,
49                             const std::string& customOp = "",
50                             bool disableFallback = true);
51 
52     /// Create Interpreter with Arm NN Classic/Opaque Delegate applied and DelegateOptions
53     DelegateTestInterpreter(std::vector<char>& model,
54                             const armnnDelegate::DelegateOptions& delegateOptions,
55                             const std::string& customOp = "");
56 
57     /// Allocate the TfLiteTensors within the graph.
58     /// This must be called before FillInputTensor(values, index) and Invoke().
AllocateTensors()59     TfLiteStatus AllocateTensors()
60     {
61         return TfLiteInterpreterAllocateTensors(m_TfLiteInterpreter);
62     }
63 
64     /// Copy a buffer of values into an input tensor at a given index.
65     template<typename T>
FillInputTensor(std::vector<T> & inputValues,int index)66     TfLiteStatus FillInputTensor(std::vector<T>& inputValues, int index)
67     {
68         TfLiteTensor* inputTensor = delegateTestInterpreter::GetInputTensorFromInterpreter(m_TfLiteInterpreter, index);
69         return delegateTestInterpreter::CopyFromBufferToTensor(inputTensor, inputValues);
70     }
71 
72     /// Copy a boolean buffer of values into an input tensor at a given index.
73     /// Boolean types get converted to a bit representation in a vector.
74     /// vector.data() returns a void pointer instead of a pointer to bool, so the tensor needs to be accessed directly.
FillInputTensor(std::vector<bool> & inputValues,int index)75     TfLiteStatus FillInputTensor(std::vector<bool>& inputValues, int index)
76     {
77         TfLiteTensor* inputTensor = delegateTestInterpreter::GetInputTensorFromInterpreter(m_TfLiteInterpreter, index);
78         if(inputTensor->type != kTfLiteBool)
79         {
80             throw armnn::Exception("Input tensor at the given index is not of bool type: " + std::to_string(index));
81         }
82 
83         // Make sure there is enough bytes allocated to copy into.
84         if(inputTensor->bytes < inputValues.size() * sizeof(bool))
85         {
86             throw armnn::Exception("Input tensor has not been allocated to match number of input values.");
87         }
88 
89         for (unsigned int i = 0; i < inputValues.size(); ++i)
90         {
91             inputTensor->data.b[i] = inputValues[i];
92         }
93 
94         return kTfLiteOk;
95     }
96 
97     /// Run the interpreter either on TFLite Runtime or Arm NN Delegate.
98     /// AllocateTensors() must be called before Invoke().
Invoke()99     TfLiteStatus Invoke()
100     {
101         return TfLiteInterpreterInvoke(m_TfLiteInterpreter);
102     }
103 
104     /// Return a buffer of values from the output tensor at a given index.
105     /// This must be called after Invoke().
106     template<typename T>
GetOutputResult(int index)107     std::vector<T> GetOutputResult(int index)
108     {
109         const TfLiteTensor* outputTensor =
110                 delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index);
111 
112         int64_t n = tflite::NumElements(outputTensor);
113         std::vector<T> output;
114         output.resize(n);
115 
116         TfLiteStatus status = TfLiteTensorCopyToBuffer(outputTensor, output.data(), output.size() * sizeof(T));
117         if(status != kTfLiteOk)
118         {
119             throw armnn::Exception("An error occurred when copying output buffer.");
120         }
121 
122         return output;
123     }
124 
125     /// Return a buffer of values from the output tensor at a given index. This must be called after Invoke().
126     /// Boolean types get converted to a bit representation in a vector.
127     /// vector.data() returns a void pointer instead of a pointer to bool, so the tensor needs to be accessed directly.
GetOutputResult(int index)128     std::vector<bool> GetOutputResult(int index)
129     {
130         const TfLiteTensor* outputTensor =
131                 delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index);
132         if(outputTensor->type != kTfLiteBool)
133         {
134             throw armnn::Exception("Output tensor at the given index is not of bool type: " + std::to_string(index));
135         }
136 
137         int64_t n = tflite::NumElements(outputTensor);
138         std::vector<bool> output(n, false);
139         output.reserve(n);
140 
141         for (unsigned int i = 0; i < output.size(); ++i)
142         {
143             output[i] = outputTensor->data.b[i];
144         }
145         return output;
146     }
147 
148     /// Return a buffer of dimensions from the output tensor at a given index.
GetOutputShape(int index)149     std::vector<int32_t> GetOutputShape(int index)
150     {
151         const TfLiteTensor* outputTensor =
152                 delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index);
153         int32_t numDims = TfLiteTensorNumDims(outputTensor);
154 
155         std::vector<int32_t> dims;
156         dims.reserve(numDims);
157 
158         for (int32_t i = 0; i < numDims; ++i)
159         {
160             dims.push_back(TfLiteTensorDim(outputTensor, i));
161         }
162         return dims;
163     }
164 
165     /// Delete TfLiteInterpreter and the TfLiteDelegate/TfLiteOpaqueDelegate
166     void Cleanup();
167 
168 private:
169     TfLiteInterpreter* m_TfLiteInterpreter;
170 
171     /// m_TfLiteDelegate can be TfLiteDelegate or TfLiteOpaqueDelegate
172     void* m_TfLiteDelegate;
173 };
174 
175 } // anonymous namespace