• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_NNAPI_NNAPI_HANDLER_H_
16 #define TENSORFLOW_LITE_NNAPI_NNAPI_HANDLER_H_
17 
18 #include <string>
19 
20 #include "tensorflow/core/platform/logging.h"
21 #include "tensorflow/lite/nnapi/NeuralNetworksTypes.h"
22 #include "tensorflow/lite/nnapi/nnapi_implementation.h"
23 
24 namespace tflite {
25 namespace nnapi {
26 
27 // Offers an interface to alter the behaviour of the NNAPI instance.
28 // As for NNAPI, it is designed to be a singleton.
29 // It allows to change the behaviour of some of the methods with some stub
30 // implementation and then to reset the behavior to the original one using
31 // Reset().
32 //
33 class NnApiHandler {
34  public:
35   // No destructor defined to allow this class to be used as singleton.
36 
37   // Factory method, only one instance per process/jni library.
38   static NnApiHandler* Instance();
39 
40   // Makes the current object a transparent proxy again, resetting any
41   // applied changes to its methods.
42   void Reset();
43 
44   // Using templates in the ...Returns methods because the functions need to be
45   // stateless and the template generated code is more readable than using a
46   // file-local variable in the method implementation to store the configured
47   // result.
48 
49   template <int Value>
GetDeviceCountReturns()50   void GetDeviceCountReturns() {
51     nnapi_->ANeuralNetworks_getDeviceCount = [](uint32_t* numDevices) -> int {
52       *numDevices = 1;
53       return Value;
54     };
55   }
56 
57   template <int DeviceCount>
GetDeviceCountReturnsCount()58   void GetDeviceCountReturnsCount() {
59     nnapi_->ANeuralNetworks_getDeviceCount = [](uint32_t* numDevices) -> int {
60       *numDevices = DeviceCount;
61       return ANEURALNETWORKS_NO_ERROR;
62     };
63   }
64 
StubGetDeviceCountWith(int (stub)(uint32_t *))65   void StubGetDeviceCountWith(int(stub)(uint32_t*)) {
66     nnapi_->ANeuralNetworks_getDeviceCount = stub;
67   }
68 
69   template <int Value>
GetDeviceReturns()70   void GetDeviceReturns() {
71     nnapi_->ANeuralNetworks_getDevice =
72         [](uint32_t devIndex, ANeuralNetworksDevice** device) -> int {
73       *device =
74           reinterpret_cast<ANeuralNetworksDevice*>(NnApiHandler::kNnapiDevice);
75       return Value;
76     };
77   }
78 
StubGetDeviceWith(int (stub)(uint32_t,ANeuralNetworksDevice **))79   void StubGetDeviceWith(int(stub)(uint32_t, ANeuralNetworksDevice**)) {
80     nnapi_->ANeuralNetworks_getDevice = stub;
81   }
82 
83   template <int Value>
GetDeviceNameReturns()84   void GetDeviceNameReturns() {
85     nnapi_->ANeuralNetworksDevice_getName =
86         [](const ANeuralNetworksDevice* device, const char** name) -> int {
87       *name = NnApiHandler::nnapi_device_name_;
88       return Value;
89     };
90   }
91 
92   void GetDeviceNameReturnsName(const std::string& name);
93 
StubGetDeviceNameWith(int (stub)(const ANeuralNetworksDevice *,const char **))94   void StubGetDeviceNameWith(int(stub)(const ANeuralNetworksDevice*,
95                                        const char**)) {
96     nnapi_->ANeuralNetworksDevice_getName = stub;
97   }
98 
99   // Configure all the functions related to device browsing to support
100   // a device with the given name and the cpu fallback nnapi-reference.
101   // The extra device will return support the specified feature level
102   void SetNnapiSupportedDevice(const std::string& name, int feature_level = 29);
103 
104   template <int Value>
ModelCreateReturns()105   void ModelCreateReturns() {
106     nnapi_->ANeuralNetworksModel_create = [](ANeuralNetworksModel** model) {
107       *model = reinterpret_cast<ANeuralNetworksModel*>(1);
108       return Value;
109     };
110   }
111 
StubModelCreateWith(int (stub)(ANeuralNetworksModel ** model))112   void StubModelCreateWith(int(stub)(ANeuralNetworksModel** model)) {
113     nnapi_->ANeuralNetworksModel_create = stub;
114   }
115 
116   template <int Value>
AddOperandReturns()117   void AddOperandReturns() {
118     nnapi_->ANeuralNetworksModel_addOperand =
119         [](ANeuralNetworksModel* model,
120            const ANeuralNetworksOperandType* type) { return Value; };
121   }
122 
StubAddOperandWith(int (stub)(ANeuralNetworksModel * model,const ANeuralNetworksOperandType * type))123   void StubAddOperandWith(int(stub)(ANeuralNetworksModel* model,
124                                     const ANeuralNetworksOperandType* type)) {
125     nnapi_->ANeuralNetworksModel_addOperand = stub;
126   }
127 
128   template <int Value>
SetOperandValueReturns()129   void SetOperandValueReturns() {
130     nnapi_->ANeuralNetworksModel_setOperandValue =
131         [](ANeuralNetworksModel* model, int32_t index, const void* buffer,
132            size_t length) { return Value; };
133   }
134 
135   template <int Value>
AddOperationReturns()136   void AddOperationReturns() {
137     nnapi_->ANeuralNetworksModel_addOperation =
138         [](ANeuralNetworksModel* model, ANeuralNetworksOperationType type,
139            uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount,
140            const uint32_t* outputs) { return Value; };
141   }
142 
StubAddOperationWith(int (stub)(ANeuralNetworksModel * model,ANeuralNetworksOperationType type,uint32_t inputCount,const uint32_t * inputs,uint32_t outputCount,const uint32_t * outputs))143   void StubAddOperationWith(
144       int(stub)(ANeuralNetworksModel* model, ANeuralNetworksOperationType type,
145                 uint32_t inputCount, const uint32_t* inputs,
146                 uint32_t outputCount, const uint32_t* outputs)) {
147     nnapi_->ANeuralNetworksModel_addOperation = stub;
148   }
149 
150   template <int Value>
IdentifyInputAndOutputsReturns()151   void IdentifyInputAndOutputsReturns() {
152     nnapi_->ANeuralNetworksModel_identifyInputsAndOutputs =
153         [](ANeuralNetworksModel* model, uint32_t inputCount,
154            const uint32_t* inputs, uint32_t outputCount,
155            const uint32_t* outputs) { return Value; };
156   }
157 
158   template <int Value>
RelaxComputationFloatReturns()159   void RelaxComputationFloatReturns() {
160     nnapi_->ANeuralNetworksModel_relaxComputationFloat32toFloat16 =
161         [](ANeuralNetworksModel* model, bool allow) { return Value; };
162   }
163 
164   template <int Value>
ModelFinishReturns()165   void ModelFinishReturns() {
166     nnapi_->ANeuralNetworksModel_finish = [](ANeuralNetworksModel* model) {
167       return Value;
168     };
169   }
170 
171   template <int Value>
MemoryCreateFromFdReturns()172   void MemoryCreateFromFdReturns() {
173     nnapi_->ANeuralNetworksMemory_createFromFd =
174         [](size_t size, int protect, int fd, size_t offset,
175            ANeuralNetworksMemory** memory) {
176           *memory = reinterpret_cast<ANeuralNetworksMemory*>(2);
177           return Value;
178         };
179   }
180 
181   template <int Value>
CompilationCreateReturns()182   void CompilationCreateReturns() {
183     nnapi_->ANeuralNetworksCompilation_create =
184         [](ANeuralNetworksModel* model,
185            ANeuralNetworksCompilation** compilation) {
186           *compilation = reinterpret_cast<ANeuralNetworksCompilation*>(3);
187           return Value;
188         };
189   }
190 
191   template <int Value>
CompilationCreateForDevicesReturns()192   void CompilationCreateForDevicesReturns() {
193     nnapi_->ANeuralNetworksCompilation_createForDevices =
194         [](ANeuralNetworksModel* model,
195            const ANeuralNetworksDevice* const* devices, uint32_t numDevices,
196            ANeuralNetworksCompilation** compilation) {
197           *compilation = reinterpret_cast<ANeuralNetworksCompilation*>(3);
198           return Value;
199         };
200   }
201 
StubCompilationCreateForDevicesWith(int (stub)(ANeuralNetworksModel * model,const ANeuralNetworksDevice * const * devices,uint32_t numDevices,ANeuralNetworksCompilation ** compilation))202   void StubCompilationCreateForDevicesWith(int(stub)(
203       ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices,
204       uint32_t numDevices, ANeuralNetworksCompilation** compilation)) {
205     nnapi_->ANeuralNetworksCompilation_createForDevices = stub;
206   }
207 
208   template <int Value>
CompilationFinishReturns()209   void CompilationFinishReturns() {
210     nnapi_->ANeuralNetworksCompilation_finish =
211         [](ANeuralNetworksCompilation* compilation) { return Value; };
212   }
213 
214   template <int Value>
ExecutionCreateReturns()215   void ExecutionCreateReturns() {
216     nnapi_->ANeuralNetworksExecution_create =
217         [](ANeuralNetworksCompilation* compilation,
218            ANeuralNetworksExecution** execution) {
219           if (compilation == nullptr) return 1;
220           *execution = reinterpret_cast<ANeuralNetworksExecution*>(4);
221           return Value;
222         };
223   }
224   template <int Value>
ExecutionSetInputFromMemoryReturns()225   void ExecutionSetInputFromMemoryReturns() {
226     nnapi_->ANeuralNetworksExecution_setInputFromMemory =
227         [](ANeuralNetworksExecution* execution, int32_t index,
228            const ANeuralNetworksOperandType* type,
229            const ANeuralNetworksMemory* memory, size_t offset,
230            size_t length) { return Value; };
231   }
232   template <int Value>
ExecutionSetOutputFromMemoryReturns()233   void ExecutionSetOutputFromMemoryReturns() {
234     nnapi_->ANeuralNetworksExecution_setOutputFromMemory =
235         [](ANeuralNetworksExecution* execution, int32_t index,
236            const ANeuralNetworksOperandType* type,
237            const ANeuralNetworksMemory* memory, size_t offset,
238            size_t length) { return Value; };
239   }
240 
241   template <int Value>
ExecutionComputeReturns()242   void ExecutionComputeReturns() {
243     nnapi_->ANeuralNetworksExecution_compute =
244         [](ANeuralNetworksExecution* execution) { return Value; };
245   }
246 
247   template <int Value>
GetSupportedOperationsForDevicesReturns()248   void GetSupportedOperationsForDevicesReturns() {
249     nnapi_->ANeuralNetworksModel_getSupportedOperationsForDevices =
250         [](const ANeuralNetworksModel* model,
251            const ANeuralNetworksDevice* const* devices, uint32_t numDevices,
252            bool* supportedOps) { return Value; };
253   }
254 
StubGetSupportedOperationsForDevicesWith(int (stub)(const ANeuralNetworksModel * model,const ANeuralNetworksDevice * const * devices,uint32_t numDevices,bool * supportedOps))255   void StubGetSupportedOperationsForDevicesWith(
256       int(stub)(const ANeuralNetworksModel* model,
257                 const ANeuralNetworksDevice* const* devices,
258                 uint32_t numDevices, bool* supportedOps)) {
259     nnapi_->ANeuralNetworksModel_getSupportedOperationsForDevices = stub;
260   }
261 
262   template <int Value>
ExecutionStartComputeReturns()263   void ExecutionStartComputeReturns() {
264     nnapi_->ANeuralNetworksExecution_startCompute =
265         [](ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event) {
266           *event = reinterpret_cast<ANeuralNetworksEvent*>(1);
267           return Value;
268         };
269   }
270 
271   template <int Value>
EventWaitReturns()272   void EventWaitReturns() {
273     nnapi_->ANeuralNetworksEvent_wait = [](ANeuralNetworksEvent* event) {
274       return Value;
275     };
276   }
277 
278   template <int Value>
SetPriorityReturns()279   void SetPriorityReturns() {
280     nnapi_->ANeuralNetworksCompilation_setPriority =
281         [](ANeuralNetworksCompilation* compilation, int priority) -> int {
282       return Value;
283     };
284   }
285 
286   template <int Value>
SetOperandSymmPerChannelQuantParamsReturns()287   void SetOperandSymmPerChannelQuantParamsReturns() {
288     nnapi_->ANeuralNetworksModel_setOperandSymmPerChannelQuantParams =
289         [](ANeuralNetworksModel* model, int32_t index,
290            const ANeuralNetworksSymmPerChannelQuantParams* channelQuant) {
291           return Value;
292         };
293   }
294 
295   /*
296    * Sets the SDK Version in the nnapi structure.
297    * If set_unsupported_ops_to_null is set to true, all the functions not
298    * available at the given sdk level will be set to null too.
299    */
300   void SetAndroidSdkVersion(int version,
301                             bool set_unsupported_ops_to_null = false);
302 
GetNnApi()303   const NnApi* GetNnApi() { return nnapi_; }
304 
305  protected:
NnApiHandler(NnApi * nnapi)306   explicit NnApiHandler(NnApi* nnapi) : nnapi_(nnapi) { DCHECK(nnapi); }
307 
308   NnApi* nnapi_;
309 
310   static const char kNnapiReferenceDeviceName[];
311   static const int kNnapiReferenceDevice;
312   static const int kNnapiDevice;
313 
314   static void SetDeviceName(const std::string& name);
315 
316  private:
317   static char* nnapi_device_name_;
318   static int nnapi_device_feature_level_;
319 };
320 
321 // Returns a pointer to an unaltered instance of NNAPI. Is intended
322 // to be used by stub methods when wanting to pass-through to original
323 // implementation for example:
324 //
325 // NnApiTestUtility()->StubGetDeviceWith(
326 //  [](uint32_t devIndex, ANeuralNetworksDevice** device) -> int {
327 //        static int count = 0;
328 //        if (count++ < 1) {
329 //          NnApiPassthroughInstance()->ANeuralNetworks_getDevice(
330 //                devIndex, device);
331 //        } else {
332 //            return ANEURALNETWORKS_BAD_DATA;
333 //        }
334 //   });
335 const NnApi* NnApiPassthroughInstance();
336 
337 // Returns an instance of NnApiProxy that can be used to alter
338 // the behaviour of the TFLite wide instance of NnApi.
339 NnApiHandler* NnApiProxyInstance();
340 
341 }  // namespace nnapi
342 }  // namespace tflite
343 
344 #endif  // TENSORFLOW_LITE_NNAPI_NNAPI_HANDLER_H_
345