• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 // Provides C++ classes to more easily use the Neural Networks API.
18 
19 #ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_NEURAL_NETWORKS_WRAPPER_H
20 #define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_NEURAL_NETWORKS_WRAPPER_H
21 
22 #include <assert.h>
23 #include <math.h>
24 
25 #include <algorithm>
26 #include <optional>
27 #include <string>
28 #include <utility>
29 #include <vector>
30 
31 #ifdef NNTEST_SLTS
32 #include "SupportLibrary.h"
33 #else
34 #include "NeuralNetworks.h"
35 #endif
36 
37 namespace android {
38 namespace nn {
39 namespace wrapper {
40 
41 enum class Type {
42     FLOAT32 = ANEURALNETWORKS_FLOAT32,
43     INT32 = ANEURALNETWORKS_INT32,
44     UINT32 = ANEURALNETWORKS_UINT32,
45     TENSOR_FLOAT32 = ANEURALNETWORKS_TENSOR_FLOAT32,
46     TENSOR_INT32 = ANEURALNETWORKS_TENSOR_INT32,
47     TENSOR_QUANT8_ASYMM = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
48     BOOL = ANEURALNETWORKS_BOOL,
49     TENSOR_QUANT16_SYMM = ANEURALNETWORKS_TENSOR_QUANT16_SYMM,
50     TENSOR_FLOAT16 = ANEURALNETWORKS_TENSOR_FLOAT16,
51     TENSOR_BOOL8 = ANEURALNETWORKS_TENSOR_BOOL8,
52     FLOAT16 = ANEURALNETWORKS_FLOAT16,
53     TENSOR_QUANT8_SYMM_PER_CHANNEL = ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL,
54     TENSOR_QUANT16_ASYMM = ANEURALNETWORKS_TENSOR_QUANT16_ASYMM,
55     TENSOR_QUANT8_SYMM = ANEURALNETWORKS_TENSOR_QUANT8_SYMM,
56     TENSOR_QUANT8_ASYMM_SIGNED = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
57     MODEL = ANEURALNETWORKS_MODEL,
58 };
59 
60 enum class ExecutePreference {
61     PREFER_LOW_POWER = ANEURALNETWORKS_PREFER_LOW_POWER,
62     PREFER_FAST_SINGLE_ANSWER = ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER,
63     PREFER_SUSTAINED_SPEED = ANEURALNETWORKS_PREFER_SUSTAINED_SPEED
64 };
65 
66 enum class Duration {
67     ON_HARDWARE = ANEURALNETWORKS_DURATION_ON_HARDWARE,
68     IN_DRIVER = ANEURALNETWORKS_DURATION_IN_DRIVER,
69     FENCED_ON_HARDWARE = ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE,
70     FENCED_IN_DRIVER = ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER,
71 };
72 
73 enum class ExecutePriority {
74     LOW = ANEURALNETWORKS_PRIORITY_LOW,
75     MEDIUM = ANEURALNETWORKS_PRIORITY_MEDIUM,
76     HIGH = ANEURALNETWORKS_PRIORITY_HIGH,
77     DEFAULT = ANEURALNETWORKS_PRIORITY_DEFAULT,
78 };
79 
80 enum class Result {
81     NO_ERROR = ANEURALNETWORKS_NO_ERROR,
82     OUT_OF_MEMORY = ANEURALNETWORKS_OUT_OF_MEMORY,
83     INCOMPLETE = ANEURALNETWORKS_INCOMPLETE,
84     UNEXPECTED_NULL = ANEURALNETWORKS_UNEXPECTED_NULL,
85     BAD_DATA = ANEURALNETWORKS_BAD_DATA,
86     OP_FAILED = ANEURALNETWORKS_OP_FAILED,
87     UNMAPPABLE = ANEURALNETWORKS_UNMAPPABLE,
88     BAD_STATE = ANEURALNETWORKS_BAD_STATE,
89     OUTPUT_INSUFFICIENT_SIZE = ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE,
90     UNAVAILABLE_DEVICE = ANEURALNETWORKS_UNAVAILABLE_DEVICE,
91     MISSED_DEADLINE_TRANSIENT = ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT,
92     MISSED_DEADLINE_PERSISTENT = ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT,
93 
94     // Functionality needed for this feature is not available on the current device.
95     FEATURE_LEVEL_TOO_LOW = 100001,
96 };
97 
98 struct SymmPerChannelQuantParams {
99     ANeuralNetworksSymmPerChannelQuantParams params;
100     std::vector<float> scales;
101 
SymmPerChannelQuantParamsSymmPerChannelQuantParams102     SymmPerChannelQuantParams(std::vector<float> scalesVec, uint32_t channelDim)
103         : scales(std::move(scalesVec)) {
104         params = {
105                 .channelDim = channelDim,
106                 .scaleCount = static_cast<uint32_t>(scales.size()),
107                 .scales = scales.size() > 0 ? scales.data() : nullptr,
108         };
109     }
110 
SymmPerChannelQuantParamsSymmPerChannelQuantParams111     SymmPerChannelQuantParams(const SymmPerChannelQuantParams& other)
112         : params(other.params), scales(other.scales) {
113         params.scales = scales.size() > 0 ? scales.data() : nullptr;
114     }
115 
116     SymmPerChannelQuantParams& operator=(const SymmPerChannelQuantParams& other) {
117         if (this != &other) {
118             params = other.params;
119             scales = other.scales;
120             params.scales = scales.size() > 0 ? scales.data() : nullptr;
121         }
122         return *this;
123     }
124 };
125 
126 struct OperandType {
127     ANeuralNetworksOperandType operandType;
128     std::vector<uint32_t> dimensions;
129     std::optional<SymmPerChannelQuantParams> channelQuant;
130 
OperandTypeOperandType131     OperandType(const OperandType& other)
132         : operandType(other.operandType),
133           dimensions(other.dimensions),
134           channelQuant(other.channelQuant) {
135         operandType.dimensions = dimensions.size() > 0 ? dimensions.data() : nullptr;
136     }
137 
138     OperandType& operator=(const OperandType& other) {
139         if (this != &other) {
140             operandType = other.operandType;
141             dimensions = other.dimensions;
142             channelQuant = other.channelQuant;
143             operandType.dimensions = dimensions.size() > 0 ? dimensions.data() : nullptr;
144         }
145         return *this;
146     }
147 
148     OperandType(Type type, std::vector<uint32_t> d, float scale = 0.0f, int32_t zeroPoint = 0)
dimensionsOperandType149         : dimensions(std::move(d)), channelQuant(std::nullopt) {
150         operandType = {
151                 .type = static_cast<int32_t>(type),
152                 .dimensionCount = static_cast<uint32_t>(dimensions.size()),
153                 .dimensions = dimensions.size() > 0 ? dimensions.data() : nullptr,
154                 .scale = scale,
155                 .zeroPoint = zeroPoint,
156         };
157     }
158 
OperandTypeOperandType159     OperandType(Type type, std::vector<uint32_t> data, SymmPerChannelQuantParams&& channelQuant)
160         : dimensions(std::move(data)), channelQuant(std::move(channelQuant)) {
161         assert(type == Type::TENSOR_QUANT8_SYMM_PER_CHANNEL);
162 
163         operandType = {
164                 .type = static_cast<int32_t>(type),
165                 .dimensionCount = static_cast<uint32_t>(dimensions.size()),
166                 .dimensions = dimensions.size() > 0 ? dimensions.data() : nullptr,
167                 .scale = 0.0f,
168                 .zeroPoint = 0,
169         };
170     }
171 
updateDimensionsOperandType172     void updateDimensions(std::vector<uint32_t> ndim) {
173         dimensions = ndim;
174         operandType.dimensions = dimensions.size() > 0 ? dimensions.data() : nullptr;
175     }
176 };
177 
178 #ifdef NNTEST_SLTS
179 #define NNAPI_CALL(apiCall) mNnApi->getFL5()->apiCall
180 #else
181 #define NNAPI_CALL(apiCall) apiCall
182 #endif
183 
184 class Memory {
185    public:
186 #ifdef NNTEST_SLTS
187     // Takes ownership of a ANeuralNetworksMemory
Memory(const NnApiSupportLibrary * nnapi,ANeuralNetworksMemory * memory)188     Memory(const NnApiSupportLibrary* nnapi, ANeuralNetworksMemory* memory)
189         : mNnApi(nnapi), mMemory(memory) {}
190 
Memory(const NnApiSupportLibrary * nnapi,size_t size,int protect,int fd,size_t offset)191     Memory(const NnApiSupportLibrary* nnapi, size_t size, int protect, int fd, size_t offset)
192         : mNnApi(nnapi) {
193 #else
194     Memory(size_t size, int protect, int fd, size_t offset) {
195 #endif
196         mValid = NNAPI_CALL(ANeuralNetworksMemory_createFromFd(
197                          size, protect, fd, offset, &mMemory)) == ANEURALNETWORKS_NO_ERROR;
198     }
199 
200 #ifdef __ANDROID__
201 #ifdef NNTEST_SLTS
202     Memory(const NnApiSupportLibrary* nnapi, AHardwareBuffer* buffer) : mNnApi(nnapi) {
203 #else   // NNTEST_SLTS
204     Memory(AHardwareBuffer* buffer) {
205 #endif  // NNTEST_SLTS
206         mValid = NNAPI_CALL(ANeuralNetworksMemory_createFromAHardwareBuffer(buffer, &mMemory)) ==
207                  ANEURALNETWORKS_NO_ERROR;
208     }
209 #endif  // __ANDROID__
210 
211     ~Memory() {
212         if (mMemory) {
213             NNAPI_CALL(ANeuralNetworksMemory_free(mMemory));
214         }
215     }
216 
217     // Disallow copy semantics to ensure the runtime object can only be freed
218     // once. Copy semantics could be enabled if some sort of reference counting
219     // or deep-copy system for runtime objects is added later.
220     Memory(const Memory&) = delete;
221     Memory& operator=(const Memory&) = delete;
222 
223     // Move semantics to remove access to the runtime object from the wrapper
224     // object that is being moved. This ensures the runtime object will be
225     // freed only once.
226     Memory(Memory&& other) { *this = std::move(other); }
227     Memory& operator=(Memory&& other) {
228         if (this != &other) {
229             if (mMemory) {
230                 NNAPI_CALL(ANeuralNetworksMemory_free(mMemory));
231             }
232             mMemory = other.mMemory;
233             mValid = other.mValid;
234             other.mMemory = nullptr;
235             other.mValid = false;
236         }
237         return *this;
238     }
239 
240     ANeuralNetworksMemory* get() const { return mMemory; }
241     bool isValid() const { return mValid; }
242 
243    private:
244 #ifdef NNTEST_SLTS
245     const NnApiSupportLibrary* mNnApi = nullptr;
246 #endif
247     ANeuralNetworksMemory* mMemory = nullptr;
248     bool mValid = true;
249 };
250 
251 class Model {
252    public:
253 #ifdef NNTEST_SLTS
254     Model(const NnApiSupportLibrary* nnapi) : mNnApi(nnapi) {
255 #else
256     Model() {
257 #endif
258         // TODO handle the value returned by this call
259         NNAPI_CALL(ANeuralNetworksModel_create(&mModel));
260     }
261     ~Model() {
262         if (mModel) {
263             NNAPI_CALL(ANeuralNetworksModel_free(mModel));
264         }
265     }
266 
267     // Disallow copy semantics to ensure the runtime object can only be freed
268     // once. Copy semantics could be enabled if some sort of reference counting
269     // or deep-copy system for runtime objects is added later.
270     Model(const Model&) = delete;
271     Model& operator=(const Model&) = delete;
272 
273     // Move semantics to remove access to the runtime object from the wrapper
274     // object that is being moved. This ensures the runtime object will be
275     // freed only once.
276     Model(Model&& other) { *this = std::move(other); }
277     Model& operator=(Model&& other) {
278         if (this != &other) {
279             if (mModel) {
280                 NNAPI_CALL(ANeuralNetworksModel_free(mModel));
281             }
282             mModel = other.mModel;
283             mNextOperandId = other.mNextOperandId;
284             mValid = other.mValid;
285             other.mModel = nullptr;
286             other.mNextOperandId = 0;
287             other.mValid = false;
288         }
289         return *this;
290     }
291 
292     Result finish() {
293         if (mValid) {
294             auto result = static_cast<Result>(NNAPI_CALL(ANeuralNetworksModel_finish(mModel)));
295             if (result != Result::NO_ERROR) {
296                 mValid = false;
297             }
298             return result;
299         } else {
300             return Result::BAD_STATE;
301         }
302     }
303 
304     uint32_t addOperand(const OperandType* type) {
305         if (NNAPI_CALL(ANeuralNetworksModel_addOperand(mModel, &(type->operandType))) !=
306             ANEURALNETWORKS_NO_ERROR) {
307             mValid = false;
308         }
309         if (type->channelQuant) {
310             if (NNAPI_CALL(ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(
311                         mModel, mNextOperandId, &type->channelQuant.value().params)) !=
312                 ANEURALNETWORKS_NO_ERROR) {
313                 mValid = false;
314             }
315         }
316         return mNextOperandId++;
317     }
318 
319     void setOperandValue(uint32_t index, const void* buffer, size_t length) {
320         if (NNAPI_CALL(ANeuralNetworksModel_setOperandValue(mModel, index, buffer, length)) !=
321             ANEURALNETWORKS_NO_ERROR) {
322             mValid = false;
323         }
324     }
325 
326     void setOperandValueFromMemory(uint32_t index, const Memory* memory, uint32_t offset,
327                                    size_t length) {
328         if (NNAPI_CALL(ANeuralNetworksModel_setOperandValueFromMemory(
329                     mModel, index, memory->get(), offset, length)) != ANEURALNETWORKS_NO_ERROR) {
330             mValid = false;
331         }
332     }
333 
334     void setOperandValueFromModel(uint32_t index, const Model* model) {
335         if (__builtin_available(android /* Android R / FL4 */ 30, *)) {
336             if (NNAPI_CALL(ANeuralNetworksModel_setOperandValueFromModel(
337                         mModel, index, model->getHandle())) != ANEURALNETWORKS_NO_ERROR) {
338                 mValid = false;
339             }
340         } else {
341             mValid = false;
342         }
343     }
344 
345     void addOperation(ANeuralNetworksOperationType type, const std::vector<uint32_t>& inputs,
346                       const std::vector<uint32_t>& outputs) {
347         if (NNAPI_CALL(ANeuralNetworksModel_addOperation(
348                     mModel, type, static_cast<uint32_t>(inputs.size()), inputs.data(),
349                     static_cast<uint32_t>(outputs.size()), outputs.data())) !=
350             ANEURALNETWORKS_NO_ERROR) {
351             mValid = false;
352         }
353     }
354     void identifyInputsAndOutputs(const std::vector<uint32_t>& inputs,
355                                   const std::vector<uint32_t>& outputs) {
356         if (NNAPI_CALL(ANeuralNetworksModel_identifyInputsAndOutputs(
357                     mModel, static_cast<uint32_t>(inputs.size()), inputs.data(),
358                     static_cast<uint32_t>(outputs.size()), outputs.data())) !=
359             ANEURALNETWORKS_NO_ERROR) {
360             mValid = false;
361         }
362     }
363 
364     void relaxComputationFloat32toFloat16(bool isRelax) {
365         if (NNAPI_CALL(ANeuralNetworksModel_relaxComputationFloat32toFloat16(mModel, isRelax)) ==
366             ANEURALNETWORKS_NO_ERROR) {
367             mRelaxed = isRelax;
368         }
369     }
370 
371     ANeuralNetworksModel* getHandle() const { return mModel; }
372     bool isValid() const { return mValid; }
373     bool isRelaxed() const { return mRelaxed; }
374 
375 #ifdef NNTEST_SLTS
376    private:
377     const NnApiSupportLibrary* mNnApi = nullptr;
378 #endif
379 
380    protected:
381     ANeuralNetworksModel* mModel = nullptr;
382     // We keep track of the operand ID as a convenience to the caller.
383     uint32_t mNextOperandId = 0;
384     bool mValid = true;
385     bool mRelaxed = false;
386 };
387 
388 class Event {
389    public:
390 #ifdef NNTEST_SLTS
391     Event(const NnApiSupportLibrary* nnapi) : mNnApi(nnapi) {}
392     Event(const NnApiSupportLibrary* nnapi, int syncFd) : mNnApi(nnapi) {
393 #else
394     Event() {}
395     Event(int syncFd) {
396 #endif
397         mValid = NNAPI_CALL(ANeuralNetworksEvent_createFromSyncFenceFd(syncFd, &mEvent)) ==
398                  ANEURALNETWORKS_NO_ERROR;
399     }
400 
401     ~Event() {
402         if (mEvent) {
403             NNAPI_CALL(ANeuralNetworksEvent_free(mEvent));
404         }
405     }
406 
407     // Disallow copy semantics to ensure the runtime object can only be freed
408     // once. Copy semantics could be enabled if some sort of reference counting
409     // or deep-copy system for runtime objects is added later.
410     Event(const Event&) = delete;
411     Event& operator=(const Event&) = delete;
412 
413     // Move semantics to remove access to the runtime object from the wrapper
414     // object that is being moved. This ensures the runtime object will be
415     // freed only once.
416     Event(Event&& other) { *this = std::move(other); }
417     Event& operator=(Event&& other) {
418         if (this != &other) {
419             if (mEvent) {
420                 NNAPI_CALL(ANeuralNetworksEvent_free(mEvent));
421             }
422 #ifdef NNTEST_SLTS
423             mNnApi = other.mNnApi;
424 #endif
425             mEvent = other.mEvent;
426             other.mEvent = nullptr;
427         }
428         return *this;
429     }
430 
431     Result wait() { return static_cast<Result>(NNAPI_CALL(ANeuralNetworksEvent_wait(mEvent))); }
432 
433     // Only for use by Execution
434     void set(ANeuralNetworksEvent* newEvent) {
435         if (mEvent) {
436             NNAPI_CALL(ANeuralNetworksEvent_free(mEvent));
437         }
438         mEvent = newEvent;
439     }
440 
441     // Only for use by Execution
442     ANeuralNetworksEvent* getHandle() const { return mEvent; }
443 
444     Result getSyncFenceFd(int* sync_fence_fd) {
445         return static_cast<Result>(
446                 NNAPI_CALL(ANeuralNetworksEvent_getSyncFenceFd(mEvent, sync_fence_fd)));
447     }
448 
449     bool isValid() const { return mValid; }
450 
451 #ifdef NNTEST_SLTS
452    private:
453     const NnApiSupportLibrary* mNnApi = nullptr;
454 #endif
455 
456    private:
457     bool mValid = true;
458     ANeuralNetworksEvent* mEvent = nullptr;
459 };
460 
461 class Compilation {
462    public:
463 #ifdef NNTEST_SLTS
464     // On success, createForDevice(s) will return Result::NO_ERROR and the created compilation;
465     // otherwise, it will return the error code and Compilation object wrapping a nullptr handle.
466     static std::pair<Result, Compilation> createForDevice(const NnApiSupportLibrary* nnapi,
467                                                           const Model* model,
468                                                           const ANeuralNetworksDevice* device) {
469         return createForDevices(nnapi, model, {device});
470     }
471     static std::pair<Result, Compilation> createForDevices(
472             const NnApiSupportLibrary* nnapi, const Model* model,
473             const std::vector<const ANeuralNetworksDevice*>& devices) {
474         ANeuralNetworksCompilation* compilation = nullptr;
475         const Result result =
476                 static_cast<Result>(nnapi->getFL5()->ANeuralNetworksCompilation_createForDevices(
477                         model->getHandle(), devices.empty() ? nullptr : devices.data(),
478                         devices.size(), &compilation));
479         return {result, Compilation(nnapi, compilation)};
480     }
481 #else
482     Compilation(const Model* model) {
483         int result =
484                 NNAPI_CALL(ANeuralNetworksCompilation_create(model->getHandle(), &mCompilation));
485         if (result != 0) {
486             // TODO Handle the error
487         }
488     }
489 #endif
490 
491     ~Compilation() { NNAPI_CALL(ANeuralNetworksCompilation_free(mCompilation)); }
492 
493     // Disallow copy semantics to ensure the runtime object can only be freed
494     // once. Copy semantics could be enabled if some sort of reference counting
495     // or deep-copy system for runtime objects is added later.
496     Compilation(const Compilation&) = delete;
497     Compilation& operator=(const Compilation&) = delete;
498 
499     // Move semantics to remove access to the runtime object from the wrapper
500     // object that is being moved. This ensures the runtime object will be
501     // freed only once.
502     Compilation(Compilation&& other) { *this = std::move(other); }
503     Compilation& operator=(Compilation&& other) {
504         if (this != &other) {
505             NNAPI_CALL(ANeuralNetworksCompilation_free(mCompilation));
506             mCompilation = other.mCompilation;
507             other.mCompilation = nullptr;
508         }
509         return *this;
510     }
511 
512     Result setPreference(ExecutePreference preference) {
513         return static_cast<Result>(NNAPI_CALL(ANeuralNetworksCompilation_setPreference(
514                 mCompilation, static_cast<int32_t>(preference))));
515     }
516 
517     Result setPriority(ExecutePriority priority) {
518         return static_cast<Result>(NNAPI_CALL(ANeuralNetworksCompilation_setPriority(
519                 mCompilation, static_cast<int32_t>(priority))));
520     }
521 
522     Result setCaching(const std::string& cacheDir, const std::vector<uint8_t>& token) {
523         if (token.size() != ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN) {
524             return Result::BAD_DATA;
525         }
526         return static_cast<Result>(NNAPI_CALL(ANeuralNetworksCompilation_setCaching(
527                 mCompilation, cacheDir.c_str(), token.data())));
528     }
529 
530     Result finish() {
531         return static_cast<Result>(NNAPI_CALL(ANeuralNetworksCompilation_finish(mCompilation)));
532     }
533 
534     Result getPreferredMemoryAlignmentForInput(uint32_t index, uint32_t* alignment) const {
535         if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
536             return static_cast<Result>(
537                     NNAPI_CALL(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(
538                             mCompilation, index, alignment)));
539         } else {
540             return Result::FEATURE_LEVEL_TOO_LOW;
541         }
542     };
543 
544     Result getPreferredMemoryPaddingForInput(uint32_t index, uint32_t* padding) const {
545         if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
546             return static_cast<Result>(
547                     NNAPI_CALL(ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(
548                             mCompilation, index, padding)));
549         } else {
550             return Result::FEATURE_LEVEL_TOO_LOW;
551         }
552     };
553 
554     Result getPreferredMemoryAlignmentForOutput(uint32_t index, uint32_t* alignment) const {
555         if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
556             return static_cast<Result>(
557                     NNAPI_CALL(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(
558                             mCompilation, index, alignment)));
559         } else {
560             return Result::FEATURE_LEVEL_TOO_LOW;
561         }
562     };
563 
564     Result getPreferredMemoryPaddingForOutput(uint32_t index, uint32_t* padding) const {
565         if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
566             return static_cast<Result>(
567                     NNAPI_CALL(ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(
568                             mCompilation, index, padding)));
569         } else {
570             return Result::FEATURE_LEVEL_TOO_LOW;
571         }
572     };
573 
574     ANeuralNetworksCompilation* getHandle() const { return mCompilation; }
575 
576 #ifdef NNTEST_SLTS
577    protected:
578     // Takes the ownership of ANeuralNetworksCompilation.
579     Compilation(const NnApiSupportLibrary* nnapi, ANeuralNetworksCompilation* compilation)
580         : mNnApi(nnapi), mCompilation(compilation) {}
581 
582    private:
583     const NnApiSupportLibrary* mNnApi = nullptr;
584 #else
585    private:
586 #endif
587     ANeuralNetworksCompilation* mCompilation = nullptr;
588 };
589 
590 class Execution {
591    public:
592 #ifdef NNTEST_SLTS
593     Execution(const NnApiSupportLibrary* nnapi, const Compilation* compilation) : mNnApi(nnapi) {
594 #else
595     Execution(const Compilation* compilation) {
596 #endif
597         int result =
598                 NNAPI_CALL(ANeuralNetworksExecution_create(compilation->getHandle(), &mExecution));
599         if (result != 0) {
600             // TODO Handle the error
601         }
602     }
603 
604     ~Execution() {
605         if (mExecution) {
606             NNAPI_CALL(ANeuralNetworksExecution_free(mExecution));
607         }
608     }
609 
610     // Disallow copy semantics to ensure the runtime object can only be freed
611     // once. Copy semantics could be enabled if some sort of reference counting
612     // or deep-copy system for runtime objects is added later.
613     Execution(const Execution&) = delete;
614     Execution& operator=(const Execution&) = delete;
615 
616     // Move semantics to remove access to the runtime object from the wrapper
617     // object that is being moved. This ensures the runtime object will be
618     // freed only once.
619     Execution(Execution&& other) { *this = std::move(other); }
620     Execution& operator=(Execution&& other) {
621         if (this != &other) {
622             if (mExecution) {
623                 NNAPI_CALL(ANeuralNetworksExecution_free(mExecution));
624             }
625             mExecution = other.mExecution;
626             other.mExecution = nullptr;
627         }
628         return *this;
629     }
630 
631     Result setInput(uint32_t index, const void* buffer, size_t length,
632                     const ANeuralNetworksOperandType* type = nullptr) {
633         return static_cast<Result>(NNAPI_CALL(
634                 ANeuralNetworksExecution_setInput(mExecution, index, type, buffer, length)));
635     }
636 
637     Result setInputFromMemory(uint32_t index, const Memory* memory, uint32_t offset,
638                               uint32_t length, const ANeuralNetworksOperandType* type = nullptr) {
639         return static_cast<Result>(NNAPI_CALL(ANeuralNetworksExecution_setInputFromMemory(
640                 mExecution, index, type, memory->get(), offset, length)));
641     }
642 
643     Result setOutput(uint32_t index, void* buffer, size_t length,
644                      const ANeuralNetworksOperandType* type = nullptr) {
645         return static_cast<Result>(NNAPI_CALL(
646                 ANeuralNetworksExecution_setOutput(mExecution, index, type, buffer, length)));
647     }
648 
649     Result setOutputFromMemory(uint32_t index, const Memory* memory, uint32_t offset,
650                                uint32_t length, const ANeuralNetworksOperandType* type = nullptr) {
651         return static_cast<Result>(NNAPI_CALL(ANeuralNetworksExecution_setOutputFromMemory(
652                 mExecution, index, type, memory->get(), offset, length)));
653     }
654 
655     Result enableInputAndOutputPadding(bool enable) {
656         if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
657             return static_cast<Result>(NNAPI_CALL(
658                     ANeuralNetworksExecution_enableInputAndOutputPadding(mExecution, enable)));
659         } else {
660             return Result::FEATURE_LEVEL_TOO_LOW;
661         }
662     }
663 
664     Result setReusable(bool reusable) {
665         if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
666             return static_cast<Result>(
667                     NNAPI_CALL(ANeuralNetworksExecution_setReusable(mExecution, reusable)));
668         } else {
669             return Result::FEATURE_LEVEL_TOO_LOW;
670         }
671     }
672 
673 #ifndef NNTEST_SLTS
674     Result startCompute(Event* event) {
675         ANeuralNetworksEvent* ev = nullptr;
676         Result result = static_cast<Result>(
677                 NNAPI_CALL(ANeuralNetworksExecution_startCompute(mExecution, &ev)));
678         event->set(ev);
679         return result;
680     }
681 
682     Result startComputeWithDependencies(const std::vector<const Event*>& dependencies,
683                                         uint64_t duration, Event* event) {
684         std::vector<const ANeuralNetworksEvent*> deps(dependencies.size());
685         std::transform(dependencies.begin(), dependencies.end(), deps.begin(),
686                        [](const Event* e) { return e->getHandle(); });
687         ANeuralNetworksEvent* ev = nullptr;
688         Result result = static_cast<Result>(
689                 NNAPI_CALL(ANeuralNetworksExecution_startComputeWithDependencies(
690                         mExecution, deps.data(), deps.size(), duration, &ev)));
691         event->set(ev);
692         return result;
693     }
694 #endif
695 
696     Result compute() {
697         return static_cast<Result>(NNAPI_CALL(ANeuralNetworksExecution_compute(mExecution)));
698     }
699 
700     Result getOutputOperandDimensions(uint32_t index, std::vector<uint32_t>* dimensions) {
701         uint32_t rank = 0;
702         Result result = static_cast<Result>(NNAPI_CALL(
703                 ANeuralNetworksExecution_getOutputOperandRank(mExecution, index, &rank)));
704         dimensions->resize(rank);
705         if ((result != Result::NO_ERROR && result != Result::OUTPUT_INSUFFICIENT_SIZE) ||
706             rank == 0) {
707             return result;
708         }
709         result = static_cast<Result>(NNAPI_CALL(ANeuralNetworksExecution_getOutputOperandDimensions(
710                 mExecution, index, dimensions->data())));
711         return result;
712     }
713 
714    private:
715 #ifdef NNTEST_SLTS
716     const NnApiSupportLibrary* mNnApi = nullptr;
717 #endif
718     ANeuralNetworksExecution* mExecution = nullptr;
719 };
720 
721 }  // namespace wrapper
722 }  // namespace nn
723 }  // namespace android
724 
725 #endif  //  ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_NEURAL_NETWORKS_WRAPPER_H
726