• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_NNAPI_NNAPI_HANDLER_H_
16 #define TENSORFLOW_LITE_NNAPI_NNAPI_HANDLER_H_
17 
18 #include "tensorflow/core/platform/logging.h"
19 #include "tensorflow/lite/nnapi/NeuralNetworksTypes.h"
20 #include "tensorflow/lite/nnapi/nnapi_implementation.h"
21 
22 namespace tflite {
23 namespace nnapi {
24 
25 // Offers an interface to alter the behaviour of the NNAPI instance.
26 // As for NNAPI, it is designed to be a singleton.
27 // It allows to change the behaviour of some of the methods with some stub
28 // implementation and then to reset the behavior to the original one using
29 // Reset().
30 //
31 class NnApiHandler {
32  public:
33   // No destructor defined to allow this class to be used as singleton.
34 
35   // Factory method, only one instance per process/jni library.
36   static NnApiHandler* Instance();
37 
38   // Makes the current object a transparent proxy again, resetting any
39   // applied changes to its methods.
40   void Reset();
41 
42   // Using templates in the ...Returns methods because the functions need to be
43   // stateless and the template generated code is more readable than using a
44   // file-local variable in the method implementation to store the configured
45   // result.
46 
47   template <int Value>
GetDeviceCountReturns()48   void GetDeviceCountReturns() {
49     nnapi_->ANeuralNetworks_getDeviceCount = [](uint32_t* numDevices) -> int {
50       *numDevices = 1;
51       return Value;
52     };
53   }
54 
55   template <int DeviceCount>
GetDeviceCountReturnsCount()56   void GetDeviceCountReturnsCount() {
57     nnapi_->ANeuralNetworks_getDeviceCount = [](uint32_t* numDevices) -> int {
58       *numDevices = DeviceCount;
59       return ANEURALNETWORKS_NO_ERROR;
60     };
61   }
62 
StubGetDeviceCountWith(int (stub)(uint32_t *))63   void StubGetDeviceCountWith(int(stub)(uint32_t*)) {
64     nnapi_->ANeuralNetworks_getDeviceCount = stub;
65   }
66 
67   template <int Value>
GetDeviceReturns()68   void GetDeviceReturns() {
69     nnapi_->ANeuralNetworks_getDevice =
70         [](uint32_t devIndex, ANeuralNetworksDevice** device) -> int {
71       *device =
72           reinterpret_cast<ANeuralNetworksDevice*>(NnApiHandler::kNnapiDevice);
73       return Value;
74     };
75   }
76 
StubGetDeviceWith(int (stub)(uint32_t,ANeuralNetworksDevice **))77   void StubGetDeviceWith(int(stub)(uint32_t, ANeuralNetworksDevice**)) {
78     nnapi_->ANeuralNetworks_getDevice = stub;
79   }
80 
81   template <int Value>
GetDeviceNameReturns()82   void GetDeviceNameReturns() {
83     nnapi_->ANeuralNetworksDevice_getName =
84         [](const ANeuralNetworksDevice* device, const char** name) -> int {
85       *name = NnApiHandler::nnapi_device_name_;
86       return Value;
87     };
88   }
89 
90   void GetDeviceNameReturnsName(const std::string& name);
91 
StubGetDeviceNameWith(int (stub)(const ANeuralNetworksDevice *,const char **))92   void StubGetDeviceNameWith(int(stub)(const ANeuralNetworksDevice*,
93                                        const char**)) {
94     nnapi_->ANeuralNetworksDevice_getName = stub;
95   }
96 
97   // Configure all the functions related to device browsing to support
98   // a device with the given name and the cpu fallback nnapi-reference.
99   // The extra device will return support the specified feature level
100   void SetNnapiSupportedDevice(const std::string& name, int feature_level = 29);
101 
102   template <int Value>
ModelCreateReturns()103   void ModelCreateReturns() {
104     nnapi_->ANeuralNetworksModel_create = [](ANeuralNetworksModel** model) {
105       *model = reinterpret_cast<ANeuralNetworksModel*>(1);
106       return Value;
107     };
108   }
109 
StubModelCreateWith(int (stub)(ANeuralNetworksModel ** model))110   void StubModelCreateWith(int(stub)(ANeuralNetworksModel** model)) {
111     nnapi_->ANeuralNetworksModel_create = stub;
112   }
113 
114   template <int Value>
AddOperandReturns()115   void AddOperandReturns() {
116     nnapi_->ANeuralNetworksModel_addOperand =
117         [](ANeuralNetworksModel* model,
118            const ANeuralNetworksOperandType* type) { return Value; };
119   }
120 
StubAddOperandWith(int (stub)(ANeuralNetworksModel * model,const ANeuralNetworksOperandType * type))121   void StubAddOperandWith(int(stub)(ANeuralNetworksModel* model,
122                                     const ANeuralNetworksOperandType* type)) {
123     nnapi_->ANeuralNetworksModel_addOperand = stub;
124   }
125 
126   template <int Value>
SetOperandValueReturns()127   void SetOperandValueReturns() {
128     nnapi_->ANeuralNetworksModel_setOperandValue =
129         [](ANeuralNetworksModel* model, int32_t index, const void* buffer,
130            size_t length) { return Value; };
131   }
132 
133   template <int Value>
AddOperationReturns()134   void AddOperationReturns() {
135     nnapi_->ANeuralNetworksModel_addOperation =
136         [](ANeuralNetworksModel* model, ANeuralNetworksOperationType type,
137            uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount,
138            const uint32_t* outputs) { return Value; };
139   }
140 
StubAddOperationWith(int (stub)(ANeuralNetworksModel * model,ANeuralNetworksOperationType type,uint32_t inputCount,const uint32_t * inputs,uint32_t outputCount,const uint32_t * outputs))141   void StubAddOperationWith(
142       int(stub)(ANeuralNetworksModel* model, ANeuralNetworksOperationType type,
143                 uint32_t inputCount, const uint32_t* inputs,
144                 uint32_t outputCount, const uint32_t* outputs)) {
145     nnapi_->ANeuralNetworksModel_addOperation = stub;
146   }
147 
148   template <int Value>
IdentifyInputAndOutputsReturns()149   void IdentifyInputAndOutputsReturns() {
150     nnapi_->ANeuralNetworksModel_identifyInputsAndOutputs =
151         [](ANeuralNetworksModel* model, uint32_t inputCount,
152            const uint32_t* inputs, uint32_t outputCount,
153            const uint32_t* outputs) { return Value; };
154   }
155 
156   template <int Value>
RelaxComputationFloatReturns()157   void RelaxComputationFloatReturns() {
158     nnapi_->ANeuralNetworksModel_relaxComputationFloat32toFloat16 =
159         [](ANeuralNetworksModel* model, bool allow) { return Value; };
160   }
161 
162   template <int Value>
ModelFinishReturns()163   void ModelFinishReturns() {
164     nnapi_->ANeuralNetworksModel_finish = [](ANeuralNetworksModel* model) {
165       return Value;
166     };
167   }
168 
169   template <int Value>
MemoryCreateFromFdReturns()170   void MemoryCreateFromFdReturns() {
171     nnapi_->ANeuralNetworksMemory_createFromFd =
172         [](size_t size, int protect, int fd, size_t offset,
173            ANeuralNetworksMemory** memory) {
174           *memory = reinterpret_cast<ANeuralNetworksMemory*>(2);
175           return Value;
176         };
177   }
178 
179   template <int Value>
CompilationCreateReturns()180   void CompilationCreateReturns() {
181     nnapi_->ANeuralNetworksCompilation_create =
182         [](ANeuralNetworksModel* model,
183            ANeuralNetworksCompilation** compilation) {
184           *compilation = reinterpret_cast<ANeuralNetworksCompilation*>(3);
185           return Value;
186         };
187   }
188 
189   template <int Value>
CompilationCreateForDevicesReturns()190   void CompilationCreateForDevicesReturns() {
191     nnapi_->ANeuralNetworksCompilation_createForDevices =
192         [](ANeuralNetworksModel* model,
193            const ANeuralNetworksDevice* const* devices, uint32_t numDevices,
194            ANeuralNetworksCompilation** compilation) {
195           *compilation = reinterpret_cast<ANeuralNetworksCompilation*>(3);
196           return Value;
197         };
198   }
199 
StubCompilationCreateForDevicesWith(int (stub)(ANeuralNetworksModel * model,const ANeuralNetworksDevice * const * devices,uint32_t numDevices,ANeuralNetworksCompilation ** compilation))200   void StubCompilationCreateForDevicesWith(int(stub)(
201       ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices,
202       uint32_t numDevices, ANeuralNetworksCompilation** compilation)) {
203     nnapi_->ANeuralNetworksCompilation_createForDevices = stub;
204   }
205 
206   template <int Value>
CompilationFinishReturns()207   void CompilationFinishReturns() {
208     nnapi_->ANeuralNetworksCompilation_finish =
209         [](ANeuralNetworksCompilation* compilation) { return Value; };
210   }
211 
212   template <int Value>
ExecutionCreateReturns()213   void ExecutionCreateReturns() {
214     nnapi_->ANeuralNetworksExecution_create =
215         [](ANeuralNetworksCompilation* compilation,
216            ANeuralNetworksExecution** execution) {
217           if (compilation == nullptr) return 1;
218           *execution = reinterpret_cast<ANeuralNetworksExecution*>(4);
219           return Value;
220         };
221   }
222   template <int Value>
ExecutionSetInputFromMemoryReturns()223   void ExecutionSetInputFromMemoryReturns() {
224     nnapi_->ANeuralNetworksExecution_setInputFromMemory =
225         [](ANeuralNetworksExecution* execution, int32_t index,
226            const ANeuralNetworksOperandType* type,
227            const ANeuralNetworksMemory* memory, size_t offset,
228            size_t length) { return Value; };
229   }
230   template <int Value>
ExecutionSetOutputFromMemoryReturns()231   void ExecutionSetOutputFromMemoryReturns() {
232     nnapi_->ANeuralNetworksExecution_setOutputFromMemory =
233         [](ANeuralNetworksExecution* execution, int32_t index,
234            const ANeuralNetworksOperandType* type,
235            const ANeuralNetworksMemory* memory, size_t offset,
236            size_t length) { return Value; };
237   }
238 
239   template <int Value>
ExecutionComputeReturns()240   void ExecutionComputeReturns() {
241     nnapi_->ANeuralNetworksExecution_compute =
242         [](ANeuralNetworksExecution* execution) { return Value; };
243   }
244 
245   template <int Value>
GetSupportedOperationsForDevicesReturns()246   void GetSupportedOperationsForDevicesReturns() {
247     nnapi_->ANeuralNetworksModel_getSupportedOperationsForDevices =
248         [](const ANeuralNetworksModel* model,
249            const ANeuralNetworksDevice* const* devices, uint32_t numDevices,
250            bool* supportedOps) { return Value; };
251   }
252 
StubGetSupportedOperationsForDevicesWith(int (stub)(const ANeuralNetworksModel * model,const ANeuralNetworksDevice * const * devices,uint32_t numDevices,bool * supportedOps))253   void StubGetSupportedOperationsForDevicesWith(
254       int(stub)(const ANeuralNetworksModel* model,
255                 const ANeuralNetworksDevice* const* devices,
256                 uint32_t numDevices, bool* supportedOps)) {
257     nnapi_->ANeuralNetworksModel_getSupportedOperationsForDevices = stub;
258   }
259 
260   template <int Value>
ExecutionStartComputeReturns()261   void ExecutionStartComputeReturns() {
262     nnapi_->ANeuralNetworksExecution_startCompute =
263         [](ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event) {
264           *event = reinterpret_cast<ANeuralNetworksEvent*>(1);
265           return Value;
266         };
267   }
268 
269   template <int Value>
EventWaitReturns()270   void EventWaitReturns() {
271     nnapi_->ANeuralNetworksEvent_wait = [](ANeuralNetworksEvent* event) {
272       return Value;
273     };
274   }
275 
276   template <int Value>
SetPriorityReturns()277   void SetPriorityReturns() {
278     nnapi_->ANeuralNetworksCompilation_setPriority =
279         [](ANeuralNetworksCompilation* compilation, int priority) -> int {
280       return Value;
281     };
282   }
283 
284   template <int Value>
SetOperandSymmPerChannelQuantParamsReturns()285   void SetOperandSymmPerChannelQuantParamsReturns() {
286     nnapi_->ANeuralNetworksModel_setOperandSymmPerChannelQuantParams =
287         [](ANeuralNetworksModel* model, int32_t index,
288            const ANeuralNetworksSymmPerChannelQuantParams* channelQuant) {
289           return Value;
290         };
291   }
292 
293   /*
294    * Sets the SDK Version in the nnapi structure.
295    * If set_unsupported_ops_to_null is set to true, all the functions not
296    * available at the given sdk level will be set to null too.
297    */
298   void SetAndroidSdkVersion(int version,
299                             bool set_unsupported_ops_to_null = false);
300 
GetNnApi()301   const NnApi* GetNnApi() { return nnapi_; }
302 
303  protected:
NnApiHandler(NnApi * nnapi)304   explicit NnApiHandler(NnApi* nnapi) : nnapi_(nnapi) { DCHECK(nnapi); }
305 
306   NnApi* nnapi_;
307 
308   static const char kNnapiReferenceDeviceName[];
309   static const int kNnapiReferenceDevice;
310   static const int kNnapiDevice;
311 
312   static void SetDeviceName(const std::string& name);
313 
314  private:
315   static char* nnapi_device_name_;
316   static int nnapi_device_feature_level_;
317 };
318 
319 // Returns a pointer to an unaltered instance of NNAPI. Is intended
320 // to be used by stub methods when wanting to pass-through to original
321 // implementation for example:
322 //
323 // NnApiTestUtility()->StubGetDeviceWith(
324 //  [](uint32_t devIndex, ANeuralNetworksDevice** device) -> int {
325 //        static int count = 0;
326 //        if (count++ < 1) {
327 //          NnApiPassthroughInstance()->ANeuralNetworks_getDevice(
328 //                devIndex, device);
329 //        } else {
330 //            return ANEURALNETWORKS_BAD_DATA;
331 //        }
332 //   });
333 const NnApi* NnApiPassthroughInstance();
334 
335 // Returns an instance of NnApiProxy that can be used to alter
336 // the behaviour of the TFLite wide instance of NnApi.
337 NnApiHandler* NnApiProxyInstance();
338 
339 }  // namespace nnapi
340 }  // namespace tflite
341 
342 #endif  // TENSORFLOW_LITE_NNAPI_NNAPI_HANDLER_H_
343