• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2022 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_LITE_SRC_LITERT_DELEGATE_NNAPI_NNAPI_IMPLEMENTATION_H_
18 #define MINDSPORE_LITE_SRC_LITERT_DELEGATE_NNAPI_NNAPI_IMPLEMENTATION_H_
19 
20 #include <cstdbool>
21 #include <cstddef>
22 #include <cstdint>
23 #include <sys/cdefs.h>
24 #include "src/litert/delegate/nnapi/NeuralNetworksTypes.h"
25 
26 // This is required for building libneuralnetworks_cl,
27 // the symbols have same names as in NDK, but
28 // they are not bounded by API availability.
29 namespace mindspore {
30 namespace lite {
31 // nn api function types
32 typedef int (*ANeuralNetworksMemoryDesc_create_fn)(ANeuralNetworksMemoryDesc **desc);
33 
34 typedef void (*ANeuralNetworksMemoryDesc_free_fn)(ANeuralNetworksMemoryDesc *desc);
35 
36 typedef int (*ANeuralNetworksMemoryDesc_addInputRole_fn)(ANeuralNetworksMemoryDesc *desc,
37                                                          const ANeuralNetworksCompilation *compilation, uint32_t index,
38                                                          float frequency);
39 
40 typedef int (*ANeuralNetworksMemoryDesc_addOutputRole_fn)(ANeuralNetworksMemoryDesc *desc,
41                                                           const ANeuralNetworksCompilation *compilation, uint32_t index,
42                                                           float frequency);
43 
44 typedef int (*ANeuralNetworksMemoryDesc_setDimensions_fn)(ANeuralNetworksMemoryDesc *desc, uint32_t rank,
45                                                           const uint32_t *dimensions);
46 
47 typedef int (*ANeuralNetworksMemoryDesc_finish_fn)(ANeuralNetworksMemoryDesc *desc);
48 
49 typedef int (*ANeuralNetworksMemory_createFromDesc_fn)(const ANeuralNetworksMemoryDesc *desc,
50                                                        ANeuralNetworksMemory **memory);
51 
52 typedef int (*ANeuralNetworksMemory_copy_fn)(const ANeuralNetworksMemory *src, const ANeuralNetworksMemory *dst);
53 
54 typedef int (*ANeuralNetworks_getDeviceCount_fn)(uint32_t *numDevices);
55 
56 typedef int (*ANeuralNetworks_getDevice_fn)(uint32_t devIndex, ANeuralNetworksDevice **device);
57 
58 typedef int (*ANeuralNetworksDevice_getName_fn)(const ANeuralNetworksDevice *device, const char **name);
59 
60 typedef int (*ANeuralNetworksDevice_getType_fn)(const ANeuralNetworksDevice *device, int32_t *type);
61 
62 typedef int (*ANeuralNetworksDevice_getVersion_fn)(const ANeuralNetworksDevice *device, const char **version);
63 
64 typedef int (*ANeuralNetworksDevice_getFeatureLevel_fn)(const ANeuralNetworksDevice *device, int64_t *featureLevel);
65 
66 typedef int (*ANeuralNetworksDevice_wait_fn)(const ANeuralNetworksDevice *device);
67 
68 typedef int (*ANeuralNetworksModel_getSupportedOperationsForDevices_fn)(const ANeuralNetworksModel *model,
69                                                                         const ANeuralNetworksDevice *const *devices,
70                                                                         uint32_t numDevices, bool *supportedOps);
71 
72 typedef int (*ANeuralNetworksCompilation_createForDevices_fn)(ANeuralNetworksModel *model,
73                                                               const ANeuralNetworksDevice *const *devices,
74                                                               uint32_t numDevices,
75                                                               ANeuralNetworksCompilation **compilation);
76 
77 typedef int (*ANeuralNetworksCompilation_setCaching_fn)(ANeuralNetworksCompilation *compilation, const char *cacheDir,
78                                                         const uint8_t *token);
79 
80 typedef int (*ANeuralNetworksExecution_compute_fn)(ANeuralNetworksExecution *execution);
81 
82 typedef int (*ANeuralNetworksExecution_getOutputOperandRank_fn)(ANeuralNetworksExecution *execution, int32_t index,
83                                                                 uint32_t *rank);
84 
85 typedef int (*ANeuralNetworksExecution_getOutputOperandDimensions_fn)(ANeuralNetworksExecution *execution,
86                                                                       int32_t index, uint32_t *dimensions);
87 
88 typedef int (*ANeuralNetworksBurst_create_fn)(ANeuralNetworksCompilation *compilation, ANeuralNetworksBurst **burst);
89 
90 typedef void (*ANeuralNetworksBurst_free_fn)(ANeuralNetworksBurst *burst);
91 
92 typedef int (*ANeuralNetworksExecution_burstCompute_fn)(ANeuralNetworksExecution *execution,
93                                                         ANeuralNetworksBurst *burst);
94 
95 typedef int (*ANeuralNetworksMemory_createFromAHardwareBuffer_fn)(const AHardwareBuffer *ahwb,
96                                                                   ANeuralNetworksMemory **memory);
97 
98 typedef int (*ANeuralNetworksExecution_setMeasureTiming_fn)(ANeuralNetworksExecution *execution, bool measure);
99 
100 typedef int (*ANeuralNetworksExecution_getDuration_fn)(const ANeuralNetworksExecution *execution, int32_t durationCode,
101                                                        uint64_t *duration);
102 
103 typedef int (*ANeuralNetworksMemory_createFromFd_fn)(size_t size, int protect, int fd, size_t offset,
104                                                      ANeuralNetworksMemory **memory);
105 
106 typedef void (*ANeuralNetworksMemory_free_fn)(ANeuralNetworksMemory *memory);
107 
108 typedef int (*ANeuralNetworksModel_create_fn)(ANeuralNetworksModel **model);
109 
110 typedef void (*ANeuralNetworksModel_free_fn)(ANeuralNetworksModel *model);
111 
112 typedef int (*ANeuralNetworksModel_finish_fn)(ANeuralNetworksModel *model);
113 
114 typedef int (*ANeuralNetworksModel_addOperand_fn)(ANeuralNetworksModel *model, const ANeuralNetworksOperandType *type);
115 
116 typedef int (*ANeuralNetworksModel_setOperandValue_fn)(ANeuralNetworksModel *model, int32_t index, const void *buffer,
117                                                        size_t length);
118 
119 typedef int (*ANeuralNetworksModel_setOperandSymmPerChannelQuantParams_fn)(
120   ANeuralNetworksModel *model, int32_t index, const ANeuralNetworksSymmPerChannelQuantParams *channelQuant);
121 
122 typedef int (*ANeuralNetworksModel_setOperandValueFromMemory_fn)(ANeuralNetworksModel *model, int32_t index,
123                                                                  const ANeuralNetworksMemory *memory, size_t offset,
124                                                                  size_t length);
125 
126 typedef int (*ANeuralNetworksModel_setOperandValueFromModel_fn)(ANeuralNetworksModel *model, int32_t index,
127                                                                 const ANeuralNetworksModel *value);
128 
129 typedef int (*ANeuralNetworksModel_addOperation_fn)(ANeuralNetworksModel *model, ANeuralNetworksOperationType type,
130                                                     uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount,
131                                                     const uint32_t *outputs);
132 
133 typedef int (*ANeuralNetworksModel_identifyInputsAndOutputs_fn)(ANeuralNetworksModel *model, uint32_t inputCount,
134                                                                 const uint32_t *inputs, uint32_t outputCount,
135                                                                 const uint32_t *outputs);
136 
137 typedef int (*ANeuralNetworksModel_relaxComputationFloat32toFloat16_fn)(ANeuralNetworksModel *model, bool allow);
138 
139 typedef int (*ANeuralNetworksCompilation_create_fn)(ANeuralNetworksModel *model,
140                                                     ANeuralNetworksCompilation **compilation);
141 
142 typedef void (*ANeuralNetworksCompilation_free_fn)(ANeuralNetworksCompilation *compilation);
143 
144 typedef int (*ANeuralNetworksCompilation_setPreference_fn)(ANeuralNetworksCompilation *compilation, int32_t preference);
145 
146 typedef int (*ANeuralNetworksCompilation_finish_fn)(ANeuralNetworksCompilation *compilation);
147 
148 typedef int (*ANeuralNetworksCompilation_setPriority_fn)(ANeuralNetworksCompilation *compilation, int priority);
149 
150 typedef int (*ANeuralNetworksCompilation_setTimeout_fn)(ANeuralNetworksCompilation *compilation, uint64_t duration);
151 
152 typedef int (*ANeuralNetworksExecution_create_fn)(ANeuralNetworksCompilation *compilation,
153                                                   ANeuralNetworksExecution **execution);
154 
155 typedef void (*ANeuralNetworksExecution_free_fn)(ANeuralNetworksExecution *execution);
156 
157 typedef int (*ANeuralNetworksExecution_setInput_fn)(ANeuralNetworksExecution *execution, int32_t index,
158                                                     const ANeuralNetworksOperandType *type, const void *buffer,
159                                                     size_t length);
160 
161 typedef int (*ANeuralNetworksExecution_setInputFromMemory_fn)(ANeuralNetworksExecution *execution, int32_t index,
162                                                               const ANeuralNetworksOperandType *type,
163                                                               const ANeuralNetworksMemory *memory, size_t offset,
164                                                               size_t length);
165 
166 typedef int (*ANeuralNetworksExecution_setOutput_fn)(ANeuralNetworksExecution *execution, int32_t index,
167                                                      const ANeuralNetworksOperandType *type, void *buffer,
168                                                      size_t length);
169 
170 typedef int (*ANeuralNetworksExecution_setOutputFromMemory_fn)(ANeuralNetworksExecution *execution, int32_t index,
171                                                                const ANeuralNetworksOperandType *type,
172                                                                const ANeuralNetworksMemory *memory, size_t offset,
173                                                                size_t length);
174 
175 typedef int (*ANeuralNetworksExecution_startCompute_fn)(ANeuralNetworksExecution *execution,
176                                                         ANeuralNetworksEvent **event);
177 
178 typedef int (*ANeuralNetworksExecution_setTimeout_fn)(ANeuralNetworksExecution *execution, uint64_t duration);
179 
180 typedef int (*ANeuralNetworksExecution_setLoopTimeout_fn)(ANeuralNetworksExecution *execution, uint64_t duration);
181 
182 typedef uint64_t (*ANeuralNetworks_getDefaultLoopTimeout_fn)();
183 
184 typedef uint64_t (*ANeuralNetworks_getMaximumLoopTimeout_fn)();
185 
186 typedef int (*ANeuralNetworksEvent_wait_fn)(ANeuralNetworksEvent *event);
187 
188 typedef void (*ANeuralNetworksEvent_free_fn)(ANeuralNetworksEvent *event);
189 
190 typedef int (*ANeuralNetworksEvent_createFromSyncFenceFd_fn)(int sync_fence_fd, ANeuralNetworksEvent **event);
191 
192 typedef int (*ANeuralNetworksEvent_getSyncFenceFd_fn)(const ANeuralNetworksEvent *event, int *sync_fence_fd);
193 
194 typedef int (*ANeuralNetworksExecution_startComputeWithDependencies_fn)(ANeuralNetworksExecution *execution,
195                                                                         const ANeuralNetworksEvent *const *dependencies,
196                                                                         uint32_t num_dependencies, uint64_t duration,
197                                                                         ANeuralNetworksEvent **event);
198 
199 typedef int64_t (*ANeuralNetworks_getRuntimeFeatureLevel_fn)();
200 
201 typedef int (*ANeuralNetworksExecution_enableInputAndOutputPadding_fn)(ANeuralNetworksExecution *execution,
202                                                                        bool enable);
203 
204 typedef int (*ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput_fn)(
205   const ANeuralNetworksCompilation *compilation, uint32_t index, uint32_t *alignment);
206 
207 typedef int (*ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput_fn)(
208   const ANeuralNetworksCompilation *compilation, uint32_t index, uint32_t *padding);
209 
210 typedef int (*ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput_fn)(
211   const ANeuralNetworksCompilation *compilation, uint32_t index, uint32_t *alignment);
212 
213 typedef int (*ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput_fn)(
214   const ANeuralNetworksCompilation *compilation, uint32_t index, uint32_t *padding);
215 
216 typedef int (*ANeuralNetworksExecution_setReusable_fn)(ANeuralNetworksExecution *execution, bool reusable);
217 
218 struct NNAPI {
219   bool nnapi_exists;
220   int32_t android_sdk_version;
221 
222   /**
223    * Create a {@link ANeuralNetworksMemoryDesc} with no properties.
224    *
225    * This only creates the memory descriptor. Its properties should be set with calls to
226    * {@link ANeuralNetworksMemoryDesc_addInputRole},
227    * {@link ANeuralNetworksMemoryDesc_addOutputRole}, and
228    * {@link ANeuralNetworksMemoryDesc_setDimensions}.
229    *
230    * {@link ANeuralNetworksMemoryDesc_finish} must be called once all properties have been set.
231    *
232    * {@link ANeuralNetworksMemoryDesc_free} must be called once the memory descriptor
233    * is no longer needed.
234    *
235    * Available since NNAPI feature level 4.
236    *
237    * @param desc The {@link ANeuralNetworksMemoryDesc} to be created.
238    *             Set to NULL if unsuccessful.
239    *
240    * @return ANEURALNETWORKS_NO_ERROR if successful.
241    */
242   int (*ANeuralNetworksMemoryDesc_create)(ANeuralNetworksMemoryDesc **desc);
243 
244   /**
245    * Destroy a memory descriptor.
246    *
247    * The memory descriptor need not have been finished by a call to
248    * {@link ANeuralNetworksMemoryDesc_finish}.
249    *
250    * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
251    *
252    * Available since NNAPI feature level 4.
253    *
254    * @param desc The memory descriptor to be destroyed. Passing NULL is acceptable and
255    *             results in no operation.
256    */
257   void (*ANeuralNetworksMemoryDesc_free)(ANeuralNetworksMemoryDesc *desc);
258 
259   /**
260    * Specify that a memory object will be playing the role of an input to an execution created from a
261    * particular compilation.
262    *
263    * The compilation and the input index fully specify an input operand. This function
264    * may be invoked multiple times on the same memory descriptor with different input operands,
265    * and the same input operand may be specified on multiple memory descriptors. However,
266    * specifying the same input operand on the same memory descriptor more than once will
267    * return an error.
268    *
269    * The dimensions of the corresponding model operands of all the roles specified by
270    * {@link ANeuralNetworksMemoryDesc_addInputRole} and
271    * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be compatible with each other. Two
272    * dimensions are incompatible if both ranks are fully specified but have different values, or if
273    * there is at least one axis that is fully specified in both but has different values.
274    *
275    * At least one of {@link ANeuralNetworksMemoryDesc_addInputRole} and
276    * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be called on a memory descriptor
277    * before invoking {@link ANeuralNetworksMemoryDesc_finish}.
278    *
279    * Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been
280    * called will return an error.
281    *
282    * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
283    *
284    * Available since NNAPI feature level 4.
285    *
286    * @param desc The memory descriptor to be modified.
287    * @param compilation The compilation object. It must already have been finished by calling
288    *                    {@link ANeuralNetworksCompilation_finish}, and must outlive the memory
289    *                    descriptor.
290    * @param index The index of the input argument we are referencing from the compilation. It is
291    *              an index into the inputs list passed to
292    *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
293    *              the index associated with {@link ANeuralNetworksModel_addOperand}.
294    * @param frequency A floating-point value within the range (0.0, 1.0]. Describes how likely the
295    *                  memory is to be used in the specified role. This is provided as a hint to
296    *                  optimize the case when different roles prefer different memory locations or data
297    *                  layouts.
298    *
299    * @return ANEURALNETWORKS_NO_ERROR if successful.
300    */
301   int (*ANeuralNetworksMemoryDesc_addInputRole)(ANeuralNetworksMemoryDesc *desc,
302                                                 const ANeuralNetworksCompilation *compilation, uint32_t index,
303                                                 float frequency);
304 
305   /**
306    * Specify that a memory object will be playing the role of an output to an execution created from a
307    * particular compilation.
308    *
309    * The compilation and the output index fully specify an output operand. This function
310    * may be invoked multiple times on the same memory descriptor with different output operands,
311    * and the same output operand may be specified on multiple memory descriptors. However,
312    * specifying the same output operand on the same memory descriptor object more than once will
313    * return an error.
314    *
315    * The dimensions of the corresponding model operands of all the roles specified by
316    * {@link ANeuralNetworksMemoryDesc_addInputRole} and
317    * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be compatible with each other. Two
318    * dimensions are incompatible if both ranks are fully specified but have different values, or if
319    * there is at least one axis that is fully specified in both but has different values.
320    *
321    * At least one of {@link ANeuralNetworksMemoryDesc_addInputRole} and
322    * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be called on the memory descriptor
323    * before invoking {@link ANeuralNetworksMemoryDesc_finish}.
324    *
325    * Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been
326    * called will return an error.
327    *
328    * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
329    *
330    * Available since NNAPI feature level 4.
331    *
332    * @param desc The memory descriptor to be modified.
333    * @param compilation The compilation object. It must already have been finished by calling
334    *                    {@link ANeuralNetworksCompilation_finish}, and must outlive the memory
335    *                    descriptor.
336    * @param index The index of the output argument we are referencing from the compilation. It is
337    *              an index into the outputs list passed to
338    *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
339    *              the index associated with {@link ANeuralNetworksModel_addOperand}.
340    * @param frequency A floating-point value within the range (0.0, 1.0]. Describes how likely the
341    *                  memory is to be used in the specified role. This is provided as a hint to
342    *                  optimize the case when multiple roles prefer different memory locations or data
343    *                  layouts.
344    *
345    * @return ANEURALNETWORKS_NO_ERROR if successful.
346    */
347   int (*ANeuralNetworksMemoryDesc_addOutputRole)(ANeuralNetworksMemoryDesc *desc,
348                                                  const ANeuralNetworksCompilation *compilation, uint32_t index,
349                                                  float frequency);
350 
351   /**
352    * Set the dimensional information of the memory descriptor.
353    *
354    * The specified dimensions must be compatible with the dimensions of the corresponding model
355    * operands of all the roles specified by {@link ANeuralNetworksMemoryDesc_addInputRole} and
356    * {@link ANeuralNetworksMemoryDesc_addOutputRole}. Two dimensions are incompatible if both ranks
357    * are fully specified but have different values, or if there is at least one axis that is fully
358    * specified in both but has different values.
359    *
360    * Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been
361    * called will return an error.
362    *
363    * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
364    *
365    * Available since NNAPI feature level 4.
366    *
367    * @param desc The memory descriptor to be modified.
368    * @param rank The number of dimensions. Must be 0 for scalars.
369    * @param dimensions An array of dimensions. An entry with the value 0 indicates that the
370    *                   corresponding axis has an unknown size.
371    *
372    * @return ANEURALNETWORKS_NO_ERROR if successful.
373    */
374   int (*ANeuralNetworksMemoryDesc_setDimensions)(ANeuralNetworksMemoryDesc *desc, uint32_t rank,
375                                                  const uint32_t *dimensions);
376 
377   /**
378    * Indicate that we have finished modifying a memory descriptor. Required before calling
379    * {@link ANeuralNetworksMemory_createFromDesc}.
380    *
381    * This function must only be called once for a given memory descriptor.
382    *
383    * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
384    *
385    * Available since NNAPI feature level 4.
386    *
387    * @param desc The memory descriptor to be finished.
388    *
389    * @return ANEURALNETWORKS_NO_ERROR if successful.
390    */
391   int (*ANeuralNetworksMemoryDesc_finish)(ANeuralNetworksMemoryDesc *desc);
392 
393   /**
394    * Creates a memory object from a memory descriptor.
395    *
396    * The memory object is created with an uninitialized buffer. A memory object with an uninitialized
397    * buffer may only be used according to the roles specified by {@link
398    * ANeuralNetworksMemoryDesc_addOutputRole}, or as the destination memory in {@link
399    * ANeuralNetworksMemory_copy}. The buffer of a memory object is initialized after the memory object
400    * is used as an output in a successful execution, or used as the destination memory in a successful
401    * {@link ANeuralNetworksMemory_copy}. A memory object with an initialized buffer may be used
402    * according to all roles specified in {@link ANeuralNetworksMemoryDesc}, or as the source or
403    * destination memory in {@link ANeuralNetworksMemory_copy}. The buffer of a memory object will
404    * return to the uninitialized state if the memory object is used as an output in a failed
405    * execution, or used as the destination memory in a failed {@link ANeuralNetworksMemory_copy}.
406    *
407    * The dimensions of the memory descriptor are deduced from the dimensions of the corresponding
408    * model operands of all the roles specified by {@link ANeuralNetworksMemoryDesc_addInputRole} and
409    * {@link ANeuralNetworksMemoryDesc_addOutputRole}, as well as the dimensions set by the call to
410    * {@link ANeuralNetworksMemoryDesc_setDimensions}, if any. The memory descriptor may have
411    * unspecified dimensions or rank. In such a case, the same memory object may be used with different
412    * shapes of outputs in different executions. When the memory is used as an input, the input shape
413    * must be the same as the output shape from the last execution using this memory object as an
414    * output, or the last {@link ANeuralNetworksMemory_copy} using this memory object as the
415    * destination memory. Creating a memory object with unspecified dimensions or rank may fail for
416    * certain sets of roles.
417    *
418    * Using the memory in roles or shapes that are not compatible with the rules specified above will
419    * return an error.
420    *
421    * When calling {@link ANeuralNetworksExecution_setInputFromMemory} or
422    * {@link ANeuralNetworksExecution_setOutputFromMemory} with the memory object,
423    * both offset and length must be set to zero and the entire memory region will be
424    * associated with the specified input or output operand.
425    *
426    * Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with the memory created from this
427    * function will return an error.
428    *
429    * {@link ANeuralNetworksMemory_free} must be called once the memory is no longer needed.
430    *
431    * Attempting to create memory from an unfinished memory descriptor will return an error.
432    *
433    * The provided {@link ANeuralNetworksMemoryDesc} need not outlive the {@link ANeuralNetworksMemory}
434    * object.
435    *
436    * Available since NNAPI feature level 4.
437    *
438    * @param desc The memory descriptor.
439    * @param memory The memory object to be created.
440    *               Set to NULL if unsuccessful.
441    *
442    * @return ANEURALNETWORKS_NO_ERROR if successful; ANEURALNETWORKS_OP_FAILED if the memory is
443    *         created with unspecified dimensions or rank and it is not supported for this set of
444    *         roles.
445    */
446   int (*ANeuralNetworksMemory_createFromDesc)(const ANeuralNetworksMemoryDesc *desc, ANeuralNetworksMemory **memory);
447 
448   /**
449    * Copies data from one memory object to another.
450    *
451    * If at most one of the src and dst is created from {@link ANeuralNetworksMemory_createFromDesc},
452    * the src and dst must have the same logical size:
453    * - If the memory is created from {@link ANeuralNetworksMemory_createFromFd}, or if it is created
454    *   from {@link ANeuralNetworksMemory_createFromAHardwareBuffer} with format of
455    *   AHARDWAREBUFFER_FORMAT_BLOB, the logical size equals the size of the memory.
456    * - If the memory is created from {@link ANeuralNetworksMemory_createFromAHardwareBuffer} with a
457    *   format other than AHARDWAREBUFFER_FORMAT_BLOB, the logical size equals the size when there is
458    *   no padding and the data is tightly packed. This function may fail if the AHardwareBuffer
459    *   cannot be accessed.
460    * - If the memory is created from {@link ANeuralNetworksMemory_createFromDesc}, the logical size
461    *   equals the size indicated by the {@link OperandCode} multiplied by the number of elements. This
462    *   function will fail if the number of elements is unknown.
463    *
464    * If both src and dst are created from {@link ANeuralNetworksMemory_createFromDesc}, they must have
465    * compatible dimensions. Two dimensions are incompatible if both ranks are fully specified but
466    * have different values, or if there is at least one axis that is fully specified in both but has
467    * different values. The dst may have unspecified dimensions or rank. In such a case, the dimensions
468    * of dst will get updated according to the dimensions of the src.
469    *
470    * In both cases, if the src is created from {@link ANeuralNetworksMemory_createFromDesc}, it must
471    * have been used as an output in a successful execution, or used as the destination memory in a
472    * successful {@link ANeuralNetworksMemory_copy}.
473    *
474    * The src and dst may have different data layout, in which case the data copying is performed
475    * logically with data layout transformation.
476    *
477    * Available since NNAPI feature level 4.
478    *
479    * @param src The source memory object.
480    * @param dst The destination memory object.
481    *
482    * @return ANEURALNETWORKS_NO_ERROR if successful.
483    */
484   int (*ANeuralNetworksMemory_copy)(const ANeuralNetworksMemory *src, const ANeuralNetworksMemory *dst);
485 
486   /**
487    * Get the number of available devices.
488    *
489    * @param numDevices Used to return the number of devices.
490    *
491    * @return ANEURALNETWORKS_NO_ERROR if successful.
492    *
493    * Available since NNAPI feature level 3.
494    */
495   int (*ANeuralNetworks_getDeviceCount)(uint32_t *numDevices);
496 
497   /**
498    * Get the representation of the specified device.
499    *
500    * @param devIndex The index of the specified device. Must be less than the
501                    number of available devices.
502    * @param device The representation of the specified device.
503    *               The same representation will always be returned for the specified
504    *               device.
505    *
506    * @return ANEURALNETWORKS_NO_ERROR if successful.
507    *
508    * Available since NNAPI feature level 3.
509    */
510   int (*ANeuralNetworks_getDevice)(uint32_t devIndex, ANeuralNetworksDevice **device);
511 
512   /**
513    * Get the name of the specified device.
514    *
515    * @param device The representation of the specified device.
516    * @param name   The returned name of the specified device. The name will be in UTF-8
517    *               and will be null-terminated. It will be recognizable as a known device name
518    *               rather than a cryptic string. For devices with feature level reported by
519    *               {@link ANeuralNetworksDevice_getFeatureLevel} that is
520    *               {@link ANEURALNETWORKS_FEATURE_LEVEL_3} and higher, the format of the name is
521    *               {VENDOR}-{DEVICE}. For devices with feature level
522    *               {@link ANEURALNETWORKS_FEATURE_LEVEL_2} or lower, the format of the name is
523    *               undefined. The name will remain valid for the duration of the application.
524    *
525    * @return ANEURALNETWORKS_NO_ERROR if successful.
526    *
527    * Available since NNAPI feature level 3.
528    */
529   int (*ANeuralNetworksDevice_getName)(const ANeuralNetworksDevice *device, const char **name);
530 
531   /**
532    * Get the type of a given device.
533    *
534    * The device type can be used to help application developers to distribute Machine Learning
535    * workloads and other workloads such as graphical rendering.
536    * E.g., for an app which renders AR scenes based on real time object detection results,
537    * the developer could choose an ACCELERATOR type device for ML workloads, and reserve GPU
538    * for graphical rendering.
539    *
540    * @param device The representation of the specified device.
541    * @param type The returned {@link DeviceTypeCode} of the specified device.
542    *
543    * @return ANEURALNETWORKS_NO_ERROR if successful.
544    *
545    * Available since NNAPI feature level 3.
546    */
547   int (*ANeuralNetworksDevice_getType)(const ANeuralNetworksDevice *device, int32_t *type);
548 
549   /**
550    * Get the version of the driver implementation of the specified device.
551    *
552    * It’s the responsibility of the driver implementor to insure that this version string
553    * uniquely distinguishes this implementation from all previous implementations.
554    *
555    * This version string must not be confused with the feature level which is solely defined
556    * by {@link ANeuralNetworksDevice_getFeatureLevel}. There is no implicit ordering of the versions.
557    * For example, it is not possible to filter all drivers older than a certain version.
558    *
559    * Application developers may use this version string to avoid or prefer specific driver
560    * implementations. For example, an application may want to do so because:
561    *     - A specific version of the driver does not provide the required performance,
562    *       perhaps because of a performance regression.
563    *     - A specific version of the driver has a bug or returns results that don’t match
564    *       the minimum precision requirement for the application.
565    *
566    * @param device The representation of the specified device.
567    * @param version The returned version string of the driver for the specified device. The
568    *                string will be in UTF-8 and will be null-terminated. For devices with feature
569    *                level 28 or lower, "UNKNOWN" will be returned. The version string will remain
570    *                valid for the duration of the application.
571    *
572    * @return ANEURALNETWORKS_NO_ERROR if successful.
573    *
574    * Available since NNAPI feature level 3.
575    */
576   int (*ANeuralNetworksDevice_getVersion)(const ANeuralNetworksDevice *device, const char **version);
577 
578   /**
579    * Get the NNAPI feature level of the specified NNAPI device.
580    *
581    * Each device has a supported feature level, which is the most advanced NNAPI specification
582    * and features this driver implements. For example, if the driver implements the features
583    * introduced in {@link ANEURALNETWORKS_FEATURE_LEVEL_2}, but does not implement the features
584    * introduced after {@link ANEURALNETWORKS_FEATURE_LEVEL_2}, the value would be
585    * {@link ANEURALNETWORKS_FEATURE_LEVEL_2}. Developers could decide whether or not the specified
586    * device should be used for a model that has certain feature requirements.
587    *
588    * NNAPI device feature level is closely related to NNAPI runtime feature level
589    * ({@link ANeuralNetworks_getRuntimeFeatureLevel}), which indicates an NNAPI runtime feature
590    * level (the most advanced NNAPI specification and features that the runtime implements).
591    * An NNAPI device feature level is always less than or equal to the runtime feature level.
592    *
593    * This function produces a {@link FeatureLevelCode} enum value, NOT an Android API level.
594    *
595    * @param device The representation of the specified device.
596    * @param featureLevel {@link FeatureLevelCode} of the most advanced feature this driver implements.
597    *
598    * @return ANEURALNETWORKS_NO_ERROR if successful.
599    *
600    * Available since NNAPI feature level 3.
601    */
602   int (*ANeuralNetworksDevice_getFeatureLevel)(const ANeuralNetworksDevice *device, int64_t *featureLevel);
603 
604   /**
605    * Wait until the device is in a live state.
606    *
607    * A device may encounter internal errors and temporarily enter a dead state. A
608    * call that uses a device in such a state will return with the error
609    * {@link ANEURALNETWORKS_DEAD_OBJECT}. ANeuralNetworksDevice_wait will block until
610    * the device is in a live state.
611    *
612    * @param device The representation of the specified device.
613    *
614    * @return ANEURALNETWORKS_NO_ERROR if successful.
615    *
616    * Available since NNAPI feature level 4.
617    */
618   int (*ANeuralNetworksDevice_wait)(const ANeuralNetworksDevice *device);
619 
620   /**
621    * Get the supported operations for a specified set of devices. If multiple devices
622    * are selected, the supported operation list is a union of supported operations of all
623    * selected devices.
624    *
625    * @param model The model to be queried.
626    * @param devices The set of devices. Must not contain duplicates.
627    * @param numDevices The number of devices in the set.
628    * @param supportedOps The boolean array to be filled. True means supported. The size of the
629    *                     boolean array must be at least as large as the number of operations
630    *                     in the model. The order of elements in the supportedOps array matches
631    *                     the order in which the corresponding operations were added to the model.
632    *
633    * @return ANEURALNETWORKS_NO_ERROR if successful.
634    *
635    * Available since NNAPI feature level 3.
636    */
637   int (*ANeuralNetworksModel_getSupportedOperationsForDevices)(const ANeuralNetworksModel *model,
638                                                                const ANeuralNetworksDevice *const *devices,
639                                                                uint32_t numDevices, bool *supportedOps);
640 
641   /**
642    * Create a {@link ANeuralNetworksCompilation} to compile the given model for a specified set
643    * of devices. If more than one device is specified, the compilation will
644    * distribute the workload automatically across the devices. The model must be fully
645    * supported by the specified set of devices. This means that
646    * ANeuralNetworksModel_getSupportedOperationsForDevices() must have returned true for every
647    * operation for that model/devices pair.
648    *
649    * The user must handle all compilation and execution failures from the
650    * specified set of devices. This is in contrast to a use of {@link
651    * ANeuralNetworksCompilation_create}, where the runtime will attempt to recover
652    * from such failures.
653    *
654    * The model passed to this function is termed the "main model" of the
655    * compilation, to distinguish it from other models referred to by an Operand
656    * of type {@link ANEURALNETWORKS_MODEL} within this compilation.
657    *
658    * @param model The {@link ANeuralNetworksModel} to be compiled.
659    * @param devices The set of devices. Must not contain duplicates.
660    * @param numDevices The number of devices in the set.
661    * @param compilation The newly created object or NULL if unsuccessful.
662    *
663    * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
664    *         if the model is invalid.
665    *
666    * Available since NNAPI feature level 3.
667    */
668   int (*ANeuralNetworksCompilation_createForDevices)(ANeuralNetworksModel *model,
669                                                      const ANeuralNetworksDevice *const *devices, uint32_t numDevices,
670                                                      ANeuralNetworksCompilation **compilation);
671 
672   /**
673    * Sets the compilation caching signature and the cache directory.
674    *
675    * Provides optional caching information to the runtime for faster repeated
676    * compilation.
677    *
678    * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
679    *
680    * @param compilation The compilation to be modified.
681    * @param cacheDir The cache directory for the runtime to store and retrieve caching
682    *                 data. It is recommended to use the code cache directory provided
683    *                 by the Android runtime. If not using the code cache directory, the
684    *                 user should choose a directory local to the application, and is
685    *                 responsible for managing the cache entries.
686    * @param token The token provided by the user to specify a model must be of length
687    *              ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN. The user should ensure that
688    *              the token is unique to a model within the application. The NNAPI
689    *              runtime cannot detect token collisions; a collision will result in a
690    *              failed execution or in a successful execution that produces incorrect
691    *              output values.
692    *
693    * @return ANEURALNETWORKS_NO_ERROR if successful.
694    *
695    * Available since NNAPI feature level 3.
696    */
697   int (*ANeuralNetworksCompilation_setCaching)(ANeuralNetworksCompilation *compilation, const char *cacheDir,
698                                                const uint8_t *token);
699 
700   /**
701    * Schedule synchronous evaluation of the execution.
702    *
703    * <p>Schedules synchronous evaluation of the execution. Returns once the
704    * execution has completed and the outputs are ready to be consumed.
705    * </p>
706    *
707    * If {@link ANeuralNetworksExecution_setTimeout} was called on this execution,
708    * and the execution is not able to complete before the timeout duration is
709    * exceeded, then execution may be aborted, in which case
710    * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned. If the device has
711    * a feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel}
712    * that is lower than 30, then the timeout duration hint will be ignored.
713    *
714    * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
715    * the condition model does not output false within the loop timeout duration,
716    * then execution will be aborted and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode}
717    * will be returned.
718    *
719    * Before NNAPI feature level 5, this function may only be invoked when the execution is in the
720    * preparation state. Starting at NNAPI feature level 5, if the user sets the execution to be
721    * reusable by {@link ANeuralNetworksExecution_setReusable}, this function may also be invoked when
722    * the execution is in the completed state.
723    *
724    * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
725    *
726    * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution.
727    * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution.
728    * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for
729    * asynchronous execution with dependencies.
730    *
731    * Available since NNAPI feature level 3.
732    *
733    * @param execution The execution to be scheduled and executed.
734    *
735    * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
736    *         ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot
737    *         be properly mapped.
738    */
739   int (*ANeuralNetworksExecution_compute)(ANeuralNetworksExecution *execution);
740 
741   /**
742    * Get the dimensional information of the specified output operand of the model of the
743    * latest computation evaluated on {@link ANeuralNetworksExecution}.
744    *
745    * This function may only be invoked when the execution is in the completed state.
746    *
747    * See {@link ANeuralNetworksExecution} for information on execution states.
748    *
749    * @param execution The execution to be queried.
750    * @param index The index of the output argument we are querying. It is
751    *              an index into the lists passed to
752    *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
753    *              the index associated with {@link ANeuralNetworksModel_addOperand}.
754    * @param rank The rank of the output operand.
755    *
756    * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE
757    *         if the target output is provided an insufficient buffer at execution time,
758    *         ANEURALNETWORKS_BAD_DATA if the index is invalid.
759    *
760    * Available since NNAPI feature level 3.
761    */
762   int (*ANeuralNetworksExecution_getOutputOperandRank)(ANeuralNetworksExecution *execution, int32_t index,
763                                                        uint32_t *rank);
764 
765   /**
766    * Get the dimensional information of the specified output operand of the model of the
767    * latest computation evaluated on {@link ANeuralNetworksExecution}. The target output operand
768    * cannot be a scalar.
769    *
770    * This function may only be invoked when the execution is in the completed state.
771    *
772    * See {@link ANeuralNetworksExecution} for information on execution states.
773    *
774    * @param execution The execution to be queried.
775    * @param index The index of the output argument we are querying. It is an index into the lists
776    *              passed to {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
777    *              the index associated with {@link ANeuralNetworksModel_addOperand}.
778    * @param dimensions The dimension array to be filled. The size of the array must be exactly as
779    *                   large as the rank of the output operand to be queried in the model.
780    *
781    * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE
782    *         if the target output is provided an insufficient buffer at execution time,
783    *         ANEURALNETWORKS_BAD_DATA if the index is invalid or if the target is a scalar.
784    *
785    * Available since NNAPI feature level 3.
786    */
787   int (*ANeuralNetworksExecution_getOutputOperandDimensions)(ANeuralNetworksExecution *execution, int32_t index,
788                                                              uint32_t *dimensions);
789 
790   /**
791    * Create a {@link ANeuralNetworksBurst} to apply the given compilation.
792    * This only creates the burst object. Computation is only performed once
793    * {@link ANeuralNetworksExecution_burstCompute} is invoked with a valid
794    * {@link ANeuralNetworksExecution} and {@link ANeuralNetworksBurst}.
795    *
796    * <p>The provided compilation must outlive the burst object.</p>
797    *
798    * Available since NNAPI feature level 3.
799    *
800    * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated.
801    * @param burst The newly created object or NULL if unsuccessful.
802    *
803    * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
804    *         if the compilation is invalid.
805    */
806   int (*ANeuralNetworksBurst_create)(ANeuralNetworksCompilation *compilation, ANeuralNetworksBurst **burst);
807 
808   /**
809    * Destroys the burst object.
810    *
811    * Available since NNAPI feature level 3.
812    *
813    * @param burst The burst object to be destroyed. Passing NULL is acceptable and
814    *              results in no operation.
815    */
816   void (*ANeuralNetworksBurst_free)(ANeuralNetworksBurst *burst);
817 
818   /**
819    * Schedule synchronous evaluation of the execution on a burst object.
820    *
821    * <p>Schedules synchronous evaluation of the execution. Returns once the
822    * execution has completed and the outputs are ready to be consumed.</p>
823    *
824    * If {@link ANeuralNetworksExecution_setTimeout} was called on the execution,
825    * and the execution is not able to complete before the timeout duration is
826    * exceeded, then execution may be aborted, in which case
827    * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned.
828    *
829    * If the execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
830    * the condition model does not output false within the loop timeout duration,
831    * then execution will be aborted and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode}
832    * will be returned. If the device has a feature level reported by
833    * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than
834    * {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout duration hint will be ignored.
835    *
836    * <p>There must be at most one {@link ANeuralNetworksExecution} processing at
837    * any given time for any given burst object. Any
838    * {@link ANeuralNetworksExecution} launched before the previous has finished
839    * will result in ANEURALNETWORKS_BAD_STATE.</p>
840    *
841    * Before NNAPI feature level 5, this function may only be invoked when the execution is in the
842    * preparation state. Starting at NNAPI feature level 5, if the user sets the execution to be
843    * reusable by {@link ANeuralNetworksExecution_setReusable}, this function may also be invoked when
844    * the execution is in the completed state.
845    *
846    * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
847    *
848    * See {@link ANeuralNetworksExecution_compute} for synchronous execution.
849    * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution.
850    * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for
851    * asynchronous execution with dependencies.
852    *
853    * Available since NNAPI feature level 3.
854    *
855    * @param burst The burst object to execute on.
856    * @param execution The execution to be scheduled and executed. The execution
857    *                  must be created from the same {@link
858    *                  ANeuralNetworksCompilation} as the burst object.
859    *
860    * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
861    */
862   int (*ANeuralNetworksExecution_burstCompute)(ANeuralNetworksExecution *execution, ANeuralNetworksBurst *burst);
863 
864   /**
865    * Creates a shared memory object from an AHardwareBuffer handle.
866    *
867    * If the shared memory is backed by an AHardwareBuffer of AHARDWAREBUFFER_FORMAT_BLOB
868    * format, it can be used the same way as shared memory created from a file handle. See
869    * {@link ANeuralNetworksMemory} for a description on how to use this shared memory.
870    *
871    * If the shared memory is backed by an AHardwareBuffer of a format other than
872    * AHARDWAREBUFFER_FORMAT_BLOB, it can only be used for model inputs and outputs.
873    * When calling {@link ANeuralNetworksExecution_setInputFromMemory} or
874    * {@link ANeuralNetworksExecution_setOutputFromMemory} with the shared memory, both
875    * offset and length must be set to zero and the entire memory region will be
876    * associated with the specified input or output operand. There is no guarantee
877    * that an arbitrary AHardwareBuffer_Format and AHardwareBuffer_UsageFlags combination
878    * can be used by arbitrary devices. The execution will fail if the selected set of
879    * devices cannot consume the buffer.
880    *
881    * Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with shared memory
882    * backed by an AHardwareBuffer of a format other than AHARDWAREBUFFER_FORMAT_BLOB is
883    * disallowed.
884    *
885    * The provided AHardwareBuffer must outlive the ANeuralNetworksMemory object.
886    *
887    * Available since NNAPI feature level 3.
888    *
889    * @param ahwb The AHardwareBuffer handle.
890    * @param memory The memory object to be created.
891    *               Set to NULL if unsuccessful.
892    *
893    * @return ANEURALNETWORKS_NO_ERROR if the request completed normally.
894    *
895    * @see AHardwareBuffer
896    */
897   int (*ANeuralNetworksMemory_createFromAHardwareBuffer)(const AHardwareBuffer *ahwb, ANeuralNetworksMemory **memory);
898 
899   /**
900    * Specifies whether duration of the {@link ANeuralNetworksExecution} is to be
901    * measured. Evaluation of the execution must not have been scheduled.
902    *
903    * By default, duration is not measured.
904    *
905    * The {@link ANeuralNetworksExecution} must have been created from an
906    * {@link ANeuralNetworksCompilation} which in turn was created from
907    * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1.
908    * If the device has a feature level reported by
909    * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than
910    * {@link ANEURALNETWORKS_FEATURE_LEVEL_3}, then the duration will not be measured.
911    *
912    * This function may only be invoked when the execution is in the preparation state.
913    *
914    * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
915    *
916    * Available since NNAPI feature level 3.
917    *
918    * @param execution The execution to be modified.
919    * @param measure 'true' if duration is to be measured, 'false' if not.
920    *
921    * @return ANEURALNETWORKS_NO_ERROR if successful.
922    */
923   int (*ANeuralNetworksExecution_setMeasureTiming)(ANeuralNetworksExecution *execution, bool measure);
924 
925   /**
926    * Get the time spent in the latest computation evaluated on the specified
927    * {@link ANeuralNetworksExecution}, in nanoseconds.
928    *
929    * This function may only be invoked when the execution is in the completed state.
930    *
931    * See {@link ANeuralNetworksExecution} for information on execution states.
932    *
933    * @param execution The execution to be queried.
934    * @param durationCode The measurement to be queried, specified by {@link DurationCode}.
935    * @param duration The returned duration. If no measurement was requested by
936    *                 {@link ANeuralNetworksExecution_setMeasureTiming}, if the
937    *                 device is has a feature level reported by
938    *                 {@link ANeuralNetworksDevice_getFeatureLevel} that is lower
939    *                 than {@link ANEURALNETWORKS_FEATURE_LEVEL_3}, or for some other
940    *                 reason the duration is not available, UINT64_MAX will be returned.
941    *                 A particular device need not support any given measurement.
942    *
943    * @return ANEURALNETWORKS_NO_ERROR if successful.
944    *
945    * Available since NNAPI feature level 3.
946    */
947   int (*ANeuralNetworksExecution_getDuration)(const ANeuralNetworksExecution *execution, int32_t durationCode,
948                                               uint64_t *duration);
949 
950   /**
951    * Creates a shared memory object from a file descriptor.
952    *
953    * The shared memory is backed by a file descriptor via mmap.
954    * See {@link ANeuralNetworksMemory} for a description on how to use
955    * this shared memory.
956    *
957    * Available since NNAPI feature level 1.
958    *
959    * @param size The requested size in bytes.
960    *             Must not be larger than the file size.
961    * @param protect The desired memory protection for the mapping.
962    *             It is either PROT_NONE or the bitwise OR of one or
963    *             more of the following flags: PROT_READ, PROT_WRITE.
964    * @param fd The requested file descriptor.
965    *           The file descriptor has to be mmap-able. The file
966    *           descriptor will be duplicated.
967    * @param offset The offset to the beginning of the file of the area to map.
968    *               The offset has to be aligned to a page size.
969    * @param memory The memory object to be created.
970    *               Set to NULL if unsuccessful.
971    *
972    * @return ANEURALNETWORKS_NO_ERROR if the request completed normally.
973    */
974   int (*ANeuralNetworksMemory_createFromFd)(size_t size, int protect, int fd, size_t offset,
975                                             ANeuralNetworksMemory **memory);
976 
977   /**
978    * Delete a memory object.
979    *
980    * Destroys the object used by the run time to keep track of the memory.
981    * This will free the underlying actual memory if no other code has open
982    * handles to this memory.
983    *
984    * Available since NNAPI feature level 1.
985    *
986    * @param memory The memory object to be freed. Passing NULL is acceptable and
987    *               results in no operation.
988    */
989   void (*ANeuralNetworksMemory_free)(ANeuralNetworksMemory *memory);
990 
991   /**
992    * Create an empty {@link ANeuralNetworksModel}.
993    *
994    * <p>This only creates the object. Computation is performed once
995    * {@link ANeuralNetworksExecution_burstCompute},
996    * {@link ANeuralNetworksExecution_compute},
997    * {@link ANeuralNetworksExecution_startCompute} or
998    * {@link ANeuralNetworksExecution_startComputeWithDependencies} is invoked.
999    *
1000    * The model should be constructed with calls to
1001    * {@link ANeuralNetworksModel_addOperation} and
1002    * {@link ANeuralNetworksModel_addOperand}
1003    *
1004    * <p>{@link ANeuralNetworksModel_finish} should be called once the model
1005    * has been fully constructed.</p>
1006    *
1007    * <p>{@link ANeuralNetworksModel_free} should be called once the model
1008    * is no longer needed.</p>
1009    *
1010    * Available since NNAPI feature level 1.
1011    *
1012    * @param model The {@link ANeuralNetworksModel} to be created.
1013    *              Set to NULL if unsuccessful.
1014    *
1015    * @return ANEURALNETWORKS_NO_ERROR if successful.
1016    */
1017   int (*ANeuralNetworksModel_create)(ANeuralNetworksModel **model);
1018 
1019   /**
1020    * Destroy a model.
1021    *
1022    * The model need not have been finished by a call to
1023    * {@link ANeuralNetworksModel_finish}.
1024    *
1025    * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1026    *
1027    * Available since NNAPI feature level 1.
1028    *
1029    * @param model The model to be destroyed. Passing NULL is acceptable and
1030    *              results in no operation.
1031    */
1032   void (*ANeuralNetworksModel_free)(ANeuralNetworksModel *model);
1033 
1034   /**
1035    * Indicate that we have finished modifying a model. Required before
1036    * calling {@link ANeuralNetworksCompilation_create} and
1037    * {@link ANeuralNetworksCompilation_createForDevices}.
1038    *
1039    * An application must ensure that no other thread uses the model at the same
1040    * time.
1041    *
1042    * This function must only be called once for a given model.
1043    *
1044    * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1045    *
1046    * Available since NNAPI feature level 1.
1047    *
1048    * @param model The model to be finished.
1049    *
1050    * @return ANEURALNETWORKS_NO_ERROR if successful.
1051    */
1052   int (*ANeuralNetworksModel_finish)(ANeuralNetworksModel *model);
1053 
1054   /**
1055    * Add an operand to a model.
1056    *
1057    * The order in which the operands are added is important. The first one added
1058    * to a model will have the index value 0, the second 1, etc. These indexes are
1059    * used as operand identifiers in
1060    * {@link ANeuralNetworksModel_addOperation},
1061    * {@link ANeuralNetworksModel_identifyInputsAndOutputs},
1062    * {@link ANeuralNetworksModel_setOperandValue},
1063    * {@link ANeuralNetworksModel_setOperandValueFromMemory},
1064    * {@link ANeuralNetworksExecution_setInput},
1065    * {@link ANeuralNetworksExecution_setInputFromMemory},
1066    * {@link ANeuralNetworksExecution_setOutput}, and
1067    * {@link ANeuralNetworksExecution_setOutputFromMemory}.
1068    *
1069    * <p>Every operand must be referenced in exactly one of the following
1070    * ways:<ul>
1071    *    <li>It is identified as a model input with
1072    *        {@link ANeuralNetworksModel_identifyInputsAndOutputs}.</li>
1073    *    <li>It is identified as a constant with
1074    *        {@link ANeuralNetworksModel_setOperandValue} or
1075    *        {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li>
1076    *    <li>It is identified as an output of exactly one operation with
1077    *        {@link ANeuralNetworksModel_addOperation}.</li>
1078    *    </ul></p>
1079    * <p>An operand that is identified as a model input or as a constant
1080    * must not also be identified as a model output with
1081    * {@link ANeuralNetworksModel_identifyInputsAndOutputs}.</p>
1082    *
1083    * To build a model that can accommodate inputs of various sizes, as
1084    * you may want to do for a CNN, leave unspecified the dimensions that
1085    * will vary at run time.  If you do so, fully specify dimensions
1086    * when calling {@link ANeuralNetworksExecution_setInput} or
1087    * {@link ANeuralNetworksExecution_setInputFromMemory}.
1088    *
1089    * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
1090    * called will return an error.
1091    *
1092    * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1093    *
1094    * Available since NNAPI feature level 1.
1095    *
1096    * @param model The model to be modified.
1097    * @param type The {@link ANeuralNetworksOperandType} that describes the shape
1098    *             of the operand.  Neither the {@link ANeuralNetworksOperandType}
1099    *             nor the dimensions it points to need to outlive the call to
1100    *             {@link ANeuralNetworksModel_addOperand}.
1101    *
1102    * @return ANEURALNETWORKS_NO_ERROR if successful.
1103    */
1104   int (*ANeuralNetworksModel_addOperand)(ANeuralNetworksModel *model, const ANeuralNetworksOperandType *type);
1105 
1106   /**
1107    * Sets an operand to a constant value.
1108    *
1109    * Values of length smaller or equal to
1110    * ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES
1111    * are immediately copied into the model.
1112    *
1113    * For values of length greater than
1114    * ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES, a pointer to
1115    * the buffer is stored within the model. The application must not change the
1116    * content of this region until all executions using this model have
1117    * completed. As the data may be copied during processing, modifying the data
1118    * after this call yields undefined results. The provided buffer must outlive
1119    * this model.
1120    *
1121    * For large tensors, using {@link ANeuralNetworksModel_setOperandValueFromMemory}
1122    * is likely to be more efficient.
1123    *
1124    * To indicate that an optional operand should be considered missing,
1125    * pass nullptr for buffer and 0 for length.
1126    *
1127    * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
1128    * called will return an error.
1129    *
1130    * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1131    *
1132    * Available since NNAPI feature level 1.
1133    *
1134    * @param model The model to be modified.
1135    * @param index The index of the model operand we're setting.
1136    * @param buffer A pointer to the data to use.
1137    * @param length The size in bytes of the data value.
1138    *
1139    * @return ANEURALNETWORKS_NO_ERROR if successful.
1140    */
1141   int (*ANeuralNetworksModel_setOperandValue)(ANeuralNetworksModel *model, int32_t index, const void *buffer,
1142                                               size_t length);
1143 
1144   /**
1145    * Sets an operand's per channel quantization parameters.
1146    *
1147    * Sets parameters required by a tensor of type
1148    * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}.
1149    * This function must be called for every tensor of type
1150    * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} before
1151    * calling {@link ANeuralNetworksModel_finish}.
1152    *
1153    * Available since NNAPI feature level 3.
1154    *
1155    * @param model The model to be modified.
1156    * @param index The index of the model operand we're setting.
1157    * @param channelQuant The per channel quantization parameters for the operand.
1158    *                    No memory in this struct needs to outlive the call to
1159    *                    this function.
1160    *
1161    * @return ANEURALNETWORKS_NO_ERROR if successful.
1162    */
1163   int (*ANeuralNetworksModel_setOperandSymmPerChannelQuantParams)(
1164     ANeuralNetworksModel *model, int32_t index, const ANeuralNetworksSymmPerChannelQuantParams *channelQuant);
1165 
1166   /**
1167    * Sets an operand to a value stored in a memory object.
1168    *
1169    * The content of the memory is not copied. A reference to that memory is stored
1170    * inside the model. The application must not change the content of the memory
1171    * region until all executions using this model have completed.  As the data may
1172    * be copied during processing, modifying the data after this call yields
1173    * undefined results.
1174    *
1175    * <p>The provided memory must outlive this model.</p>
1176    *
1177    * To indicate that an optional operand should be considered missing,
1178    * use {@link ANeuralNetworksModel_setOperandValue} instead, passing nullptr for buffer.
1179    *
1180    * It is disallowed to set an operand value with shared memory backed by an AHardwareBuffer
1181    * of a format other than AHARDWAREBUFFER_FORMAT_BLOB.
1182    *
1183    * It is disallowed to set an operand value with memory created from
1184    * {@link ANeuralNetworksMemory_createFromDesc}.
1185    *
1186    * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
1187    * called will return an error.
1188    *
1189    * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1190    * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on
1191    * AHardwareBuffer usage.
1192    *
1193    * Available since NNAPI feature level 1.
1194    *
1195    * @param model The model to be modified.
1196    * @param index The index of the model operand we're setting.
1197    * @param memory The memory containing the data.
1198    * @param offset This specifies the location of the data within the memory.
1199    *               The offset is in bytes from the start of memory.
1200    * @param length The size in bytes of the data value.
1201    *
1202    * @return ANEURALNETWORKS_NO_ERROR if successful.
1203    */
1204   int (*ANeuralNetworksModel_setOperandValueFromMemory)(ANeuralNetworksModel *model, int32_t index,
1205                                                         const ANeuralNetworksMemory *memory, size_t offset,
1206                                                         size_t length);
1207 
1208   /**
1209    * Sets an operand to a value that is a reference to another NNAPI model.
1210    *
1211    * The referenced model must already have been finished by a call to
1212    * {@link ANeuralNetworksModel_finish}.
1213    *
1214    * The {@link ANeuralNetworksModel_relaxComputationFloat32toFloat16} setting of
1215    * referenced models is overridden by that setting of the main model of a
1216    * compilation.
1217    *
1218    * The referenced model must outlive the model referring to it.
1219    *
1220    * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
1221    * been called will return an error.
1222    *
1223    * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1224    *
1225    * Available since NNAPI feature level 4.
1226    *
1227    * @param model The model to be modified.
1228    * @param index The index of the model operand we're setting.
1229    * @param value The model to be referenced.
1230    *
1231    * @return ANEURALNETWORKS_NO_ERROR if successful.
1232    */
1233   int (*ANeuralNetworksModel_setOperandValueFromModel)(ANeuralNetworksModel *model, int32_t index,
1234                                                        const ANeuralNetworksModel *value);
1235 
1236   /**
1237    * Add an operation to a model.
1238    *
1239    * @param model The model to be modified.
1240    * @param type The {@link ANeuralNetworksOperationType} of the operation.
1241    * @param inputCount The number of entries in the inputs array.
1242    * @param inputs An array of indexes identifying each operand.
1243    * @param outputCount The number of entries in the outputs array.
1244    * @param outputs An array of indexes identifying each operand.
1245    *
1246    * The operands specified by inputs and outputs must have been
1247    * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
1248    *
1249    * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
1250    * called will return an error.
1251    *
1252    * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1253    *
1254    * Available since NNAPI feature level 1.
1255    *
1256    * @return ANEURALNETWORKS_NO_ERROR if successful.
1257    */
1258   int (*ANeuralNetworksModel_addOperation)(ANeuralNetworksModel *model, ANeuralNetworksOperationType type,
1259                                            uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount,
1260                                            const uint32_t *outputs);
1261 
1262   /**
1263    * Specifies which operands will be the model's inputs and
1264    * outputs. Every model must have at least one input and one output.
1265    *
1266    * An operand cannot be used for both input and output. Doing so will
1267    * return an error.
1268    *
1269    * @param model The model to be modified.
1270    * @param inputCount The number of entries in the inputs array.
1271    * @param inputs An array of indexes identifying the input operands.
1272    * @param outputCount The number of entries in the outputs array.
1273    * @param outputs An array of indexes identifying the output operands.
1274    *
1275    * The operands specified by inputs and outputs must have been
1276    * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
1277    *
1278    * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
1279    * called will return an error.
1280    *
1281    * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1282    *
1283    * Available since NNAPI feature level 1.
1284    *
1285    */
1286   int (*ANeuralNetworksModel_identifyInputsAndOutputs)(ANeuralNetworksModel *model, uint32_t inputCount,
1287                                                        const uint32_t *inputs, uint32_t outputCount,
1288                                                        const uint32_t *outputs);
1289 
1290   /**
1291    * Specifies whether {@link ANEURALNETWORKS_TENSOR_FLOAT32} is allowed to be
1292    * calculated with range and/or precision as low as that of the IEEE 754 16-bit
1293    * floating-point format. By default, {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1294    * must be calculated using at least the range and precision of the IEEE 754
1295    * 32-bit floating-point format.
1296    *
1297    * The relaxComputationFloat32toFloat16 setting of the main model of
1298    * a compilation overrides the values of the referenced models.
1299    *
1300    * @param model The model to be modified.
1301    * @param allow 'true' indicates {@link ANEURALNETWORKS_TENSOR_FLOAT32} may be
1302    *              calculated with range and/or precision as low as that of the
1303    *              IEEE 754 16-bit floating point format. 'false' indicates
1304    *              {@link ANEURALNETWORKS_TENSOR_FLOAT32} must be calculated using
1305    *              at least the range and precision of the IEEE 754 32-bit floating
1306    *              point format.
1307    *
1308    * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
1309    * called will return an error.
1310    *
1311    * Available since NNAPI feature level 2.
1312    *
1313    * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1314    */
1315   int (*ANeuralNetworksModel_relaxComputationFloat32toFloat16)(ANeuralNetworksModel *model, bool allow);
1316 
1317   /**
1318    * Create a {@link ANeuralNetworksCompilation} to compile the given model.
1319    *
1320    * The model passed to this function is termed the "main model" of the
1321    * compilation, to distinguish it from other models referred to by an Operand
1322    * of type {@link ANEURALNETWORKS_MODEL} within this compilation.
1323    *
1324    * <p>This function only creates the object. Compilation is only performed once
1325    * {@link ANeuralNetworksCompilation_finish} is invoked.</p>
1326    *
1327    * <p>{@link ANeuralNetworksCompilation_finish} should be called once
1328    * all desired properties have been set on the compilation.</p>
1329    *
1330    * <p>{@link ANeuralNetworksModel_free} should be called once the compilation
1331    * is no longer needed.</p>
1332    *
1333    * <p>The provided model must outlive the compilation.</p>
1334    *
1335    * The model must already have been finished by a call to
1336    * {@link ANeuralNetworksModel_finish}.
1337    *
1338    * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
1339    *
1340    * Available since NNAPI feature level 1.
1341    *
1342    * @param model The {@link ANeuralNetworksModel} to be compiled.
1343    * @param compilation The newly created object or NULL if unsuccessful.
1344    *
1345    * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
1346    *         if the model is invalid.
1347    */
1348   int (*ANeuralNetworksCompilation_create)(ANeuralNetworksModel *model, ANeuralNetworksCompilation **compilation);
1349 
1350   /**
1351    * Destroy a compilation.
1352    *
1353    * The compilation need not have been finished by a call to
1354    * {@link ANeuralNetworksCompilation_finish}.
1355    *
1356    * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
1357    *
1358    * Available since NNAPI feature level 1.
1359    *
1360    * @param compilation The compilation to be destroyed. Passing NULL is acceptable and
1361    *                    results in no operation.
1362    */
1363   void (*ANeuralNetworksCompilation_free)(ANeuralNetworksCompilation *compilation);
1364 
1365   /**
1366    * Sets the execution preference.
1367    *
1368    * <p>Provides guidance to the runtime when trade-offs are possible. By default the runtime
1369    * uses PREFER_SINGLE_FAST_ANSWER</p>
1370    *
1371    * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
1372    *
1373    * Available since NNAPI feature level 1.
1374    *
1375    * @param compilation The compilation to be modified.
1376    * @param preference Either {@link ANEURALNETWORKS_PREFER_LOW_POWER},
1377    *                  {@link ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER}, or
1378    *                  {@link ANEURALNETWORKS_PREFER_SUSTAINED_SPEED}.
1379    *
1380    * @return ANEURALNETWORKS_NO_ERROR if successful.
1381    */
1382   int (*ANeuralNetworksCompilation_setPreference)(ANeuralNetworksCompilation *compilation, int32_t preference);
1383 
1384   /**
1385    * Indicate that we have finished modifying a compilation. Required before
1386    * calling {@link ANeuralNetworksBurst_create} or
1387    * {@link ANeuralNetworksExecution_create}.
1388    *
1389    * An application must ensure that no other thread uses the compilation at the
1390    * same time.
1391    *
1392    * This function must only be called once for a given compilation.
1393    *
1394    * If {@link ANeuralNetworksCompilation_setTimeout} was called on this
1395    * compilation, and the compilation is not able to be finished before the
1396    * timeout duration is exceeded, then compilation may be aborted, in which case
1397    * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned.
1398    *
1399    * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
1400    *
1401    * Available since NNAPI feature level 1.
1402    *
1403    * @param compilation The compilation to be finished.
1404    *
1405    * @return ANEURALNETWORKS_NO_ERROR if successful.
1406    */
1407   int (*ANeuralNetworksCompilation_finish)(ANeuralNetworksCompilation *compilation);
1408 
1409   /**
1410    * Set the execution priority.
1411    *
1412    * Execution priorities are relative to other executions created by the same
1413    * application (specifically same uid) for the same device. Specifically,
1414    * priorities of executions from one application will not affect executions from
1415    * another application. Similarly, priorities of executions on one device will
1416    * not affect executions on another device.
1417    *
1418    * Higher priority executions may use more compute resources than lower priority
1419    * executions, and may preempt or starve lower priority executions.
1420    *
1421    * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
1422    *
1423    * Available since NNAPI feature level 4.
1424    *
1425    * @param compilation The compilation to be modified.
1426    * @param priority The relative priority of the execution compared to other
1427    *     executions created by the application. Must be one of
1428    *     ANEURALNETWORKS_PRIORITY_*.
1429    *
1430    * @return ANEURALNETWORKS_NO_ERROR if successful.
1431    */
1432   int (*ANeuralNetworksCompilation_setPriority)(ANeuralNetworksCompilation *compilation, int priority);
1433 
1434   /**
1435    * Set the maximum expected duration for compiling the model.
1436    *
1437    * If the device is not able to complete the compilation within the specified
1438    * duration, the compilation may be aborted. The timeout duration begins at the
1439    * call to {@link ANeuralNetworksCompilation_finish}.
1440    *
1441    * This timeout duration acts as a hint to drivers, and can be used to both free
1442    * up compute resources within the driver and return control back to the
1443    * application quicker than is possible without the hint. It enables drivers
1444    * that are able to estimate how long a compilation will take to abort the
1445    * compilation before it has even started if the driver believes the compilation
1446    * cannot be completed within the timeout duration. Similarly, it enables
1447    * drivers to abort an ongoing compilation if it is taking too long. However,
1448    * this call does not guarantee that the compilation will complete or abort
1449    * within the timeout duration.
1450    *
1451    * By default (i.e., unless ANeuralNetworksCompilation_setTimeout is called),
1452    * the timeout duration for compiling the model is considered infinite.
1453    *
1454    * The {@link ANeuralNetworksCompilation} must have been created with
1455    * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1,
1456    * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If the
1457    * device has a feature level reported by
1458    * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than
1459    * {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout duration hint will
1460    * be ignored.
1461    *
1462    * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
1463    *
1464    * @param compilation The compilation to be modified.
1465    * @param duration The maximum amount of time in nanoseconds that is expected to
1466    *     be spent finishing a compilation. If this duration is exceeded, the
1467    *     compilation may be aborted. If set to 0, the timeout duration is
1468    *     considered infinite.
1469    *
1470    * @return ANEURALNETWORKS_NO_ERROR if successful.
1471    *
1472    * Available since NNAPI feature level 4.
1473    */
1474   int (*ANeuralNetworksCompilation_setTimeout)(ANeuralNetworksCompilation *compilation, uint64_t duration);
1475 
1476   /**
1477    * Create a {@link ANeuralNetworksExecution} to apply the given compilation.
1478    * This only creates the object. Computation is only performed once
1479    * {@link ANeuralNetworksExecution_burstCompute},
1480    * {@link ANeuralNetworksExecution_compute},
1481    * {@link ANeuralNetworksExecution_startCompute} or
1482    * {@link ANeuralNetworksExecution_startComputeWithDependencies} is invoked.
1483    *
1484    * <p>The provided compilation must outlive the execution.</p>
1485    *
1486    * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
1487    *
1488    * Available since NNAPI feature level 1.
1489    *
1490    * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated.
1491    * @param execution The newly created object or NULL if unsuccessful.
1492    *
1493    * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
1494    *         if the compilation is invalid.
1495    */
1496   int (*ANeuralNetworksExecution_create)(ANeuralNetworksCompilation *compilation, ANeuralNetworksExecution **execution);
1497 
1498   /**
1499    * Destroy an execution.
1500    *
1501    * <p>The execution need not have been scheduled by a call to
1502    * {@link ANeuralNetworksExecution_burstCompute},
1503    * {@link ANeuralNetworksExecution_compute},
1504    * {@link ANeuralNetworksExecution_startCompute} or
1505    * {@link ANeuralNetworksExecution_startComputeWithDependencies}; but if it has been scheduled,
1506    * then the application must not call {@link ANeuralNetworksExecution_free}
1507    * until the execution has completed (i.e.,
1508    * {@link ANeuralNetworksExecution_burstCompute},
1509    * {@link ANeuralNetworksExecution_compute}, or
1510    * {@link ANeuralNetworksEvent_wait} has returned).
1511    *
1512    * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
1513    *
1514    * Available since NNAPI feature level 1.
1515    *
1516    * @param execution The execution to be destroyed. Passing NULL is acceptable and
1517    *                  results in no operation.
1518    */
1519   void (*ANeuralNetworksExecution_free)(ANeuralNetworksExecution *execution);
1520 
1521   /**
1522    * Associate a user buffer with an input of the model of the
1523    * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
1524    * been scheduled. Once evaluation of the execution has been scheduled, the
1525    * application must not change the content of the buffer until the execution has
1526    * completed. Evaluation of the execution will not change the content of the
1527    * buffer.
1528    *
1529    * <p>The provided buffer must outlive the execution.</p>
1530    *
1531    * If the input is optional, you can indicate that it is omitted by
1532    * passing nullptr for buffer and 0 for length.
1533    *
1534    * Otherwise, if the user has not set the execution to accept padded input buffers by
1535    * calling {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, then the length argument
1536    * must be equal to the raw size of the input (i.e. the size of an element multiplied by the
1537    * number of elements). Passing a length argument with value not equal to the raw size of the input
1538    * will result in ANEURALNETWORKS_BAD_DATA.
1539    *
1540    * Otherwise, if the user has set the execution to accept padded input buffers by calling
1541    * {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, the length argument may be greater
1542    * than the raw size of the input, and the extra bytes at the end of the buffer may be used
1543    * by the driver to access data in chunks, for efficiency. Passing a length argument with value
1544    * less than the raw size of the input will result in ANEURALNETWORKS_BAD_DATA.
1545    *
1546    * This function may only be invoked when the execution is in the preparation state.
1547    *
1548    * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1549    * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput} and
1550    * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput} for information on getting
1551    * preferred buffer alignment and padding, to improve performance.
1552    *
1553    * Available since NNAPI feature level 1.
1554    *
1555    * @param execution The execution to be modified.
1556    * @param index The index of the input argument we are setting. It is
1557    *              an index into the lists passed to
1558    *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
1559    *              the index associated with
1560    *              {@link ANeuralNetworksModel_addOperand}.
1561    * @param type The {@link ANeuralNetworksOperandType} of the
1562    *             operand. Unless the input is omitted, this should be
1563    *             used to specify the dimensions that were left
1564    *             unspecified when the operand was added to the
1565    *             model. All other properties of the type must be the
1566    *             same as specified in the model. If the type is the same
1567    *             as specified when the model was built, NULL can be
1568    *             passed. Neither the {@link ANeuralNetworksOperandType}
1569    *             nor the dimensions it points to need to outlive the call
1570    *             to {@link ANeuralNetworksExecution_setInput}.
1571    * @param buffer The buffer containing the data.
1572    * @param length The size of the data value in bytes plus any end padding.
1573    *
1574    * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
1575    *         name is not recognized or the buffer is too small for the input.
1576    */
1577   int (*ANeuralNetworksExecution_setInput)(ANeuralNetworksExecution *execution, int32_t index,
1578                                            const ANeuralNetworksOperandType *type, const void *buffer, size_t length);
1579 
1580   /**
1581    * Associate a region of a memory object with an input of the model of the
1582    * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
1583    * been scheduled. Once evaluation of the execution has been scheduled, the
1584    * application must not change the content of the region until the execution has
1585    * completed. Evaluation of the execution will not change the content of the
1586    * region.
1587    *
1588    * <p>The provided memory must outlive the execution.</p>
1589    *
1590    * If the input is optional, you can indicate that it is omitted by
1591    * using {@link ANeuralNetworksExecution_setInput} instead, passing nullptr for
1592    * buffer and 0 for length.
1593    *
1594    * If the memory is an AHardwareBuffer of a format other than AHARDWAREBUFFER_FORMAT_BLOB created
1595    * from {@link ANeuralNetworksMemory_createFromAHardwareBuffer}, or an opaque memory object created
1596    * from {@link ANeuralNetworksMemory_createFromDesc}, both offset and length must be 0, indicating
1597    * the whole memory is used.
1598    *
1599    * Otherwise, if the user has not set the execution to accept padded input memory objects by
1600    * calling {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, then the length argument
1601    * must be equal to the raw size of the input (i.e. the size of an element multiplied by the
1602    * number of elements). Passing a length argument with value not equal to the raw size of the input
1603    * will result in ANEURALNETWORKS_BAD_DATA.
1604    *
1605    * Otherwise, if the user has set the execution to accept padded input memory objects by calling
1606    * {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, the length argument may be greater
1607    * than the raw size of the input, and the extra bytes at the end of the memory region may be used
1608    * by the driver to access data in chunks, for efficiency. Passing a length argument with value
1609    * less than the raw size of the input will result in ANEURALNETWORKS_BAD_DATA.
1610    *
1611    * This function may only be invoked when the execution is in the preparation state.
1612    *
1613    * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1614    * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on
1615    * AHardwareBuffer usage.
1616    * See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects
1617    * created from memory descriptors.
1618    * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput} and
1619    * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput} for information on getting
1620    * preferred memory alignment and padding, to improve performance.
1621    *
1622    * Available since NNAPI feature level 1.
1623    *
1624    * @param execution The execution to be modified.
1625    * @param index The index of the input argument we are setting. It is
1626    *              an index into the lists passed to
1627    *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
1628    *              the index associated with {@link ANeuralNetworksModel_addOperand}.
1629    * @param type The {@link ANeuralNetworksOperandType} of the
1630    *             operand. This should be used to specify the dimensions
1631    *             that were left unspecified when the operand was added
1632    *             to the model. All other properties of the type must be
1633    *             the same as specified in the model. If the type is the
1634    *             same as specified when the model was built, NULL can be
1635    *             passed. Neither the {@link ANeuralNetworksOperandType}
1636    *             nor the dimensions it points to need to outlive the call
1637    *             to {@link ANeuralNetworksExecution_setInputFromMemory}.
1638    * @param memory The memory containing the data.
1639    * @param offset This specifies the location of the data within the memory.
1640    *               The offset is in bytes from the start of memory.
1641    * @param length The size of the data value in bytes plus any end padding.
1642    *
1643    * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
1644    *         name is not recognized or the buffer is too small for the input.
1645    */
1646   int (*ANeuralNetworksExecution_setInputFromMemory)(ANeuralNetworksExecution *execution, int32_t index,
1647                                                      const ANeuralNetworksOperandType *type,
1648                                                      const ANeuralNetworksMemory *memory, size_t offset, size_t length);
1649 
1650   /**
1651    * Associate a user buffer with an output of the model of the
1652    * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
1653    * been scheduled. Once evaluation of the execution has been scheduled, the
1654    * application must not change the content of the buffer until the execution has
1655    * completed.
1656    *
1657    * <p>The provided buffer must outlive the execution.</p>
1658    *
1659    * If the output is optional, you can indicate that it is omitted by
1660    * passing nullptr for buffer and 0 for length.
1661    *
1662    * Otherwise, if the user has not set the execution to accept padded output buffers by
1663    * calling {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, then the length argument
1664    * must be equal to the raw size of the output (i.e. the size of an element multiplied by the
1665    * number of elements). Passing a length argument with value not equal to the raw size of the output
1666    * will result in ANEURALNETWORKS_BAD_DATA.
1667    *
1668    * Otherwise, if the user has set the execution to accept padded output buffers by calling
1669    * {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, the length argument may be greater
1670    * than the raw size of the output, and the extra bytes at the end of the buffer may be used
1671    * by the driver to access data in chunks, for efficiency. Passing a length argument with value
1672    * less than the raw size of the output will result in ANEURALNETWORKS_BAD_DATA.
1673    *
1674    * This function may only be invoked when the execution is in the preparation state.
1675    *
1676    * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1677    * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput} and
1678    * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput} for information on getting
1679    * preferred buffer alignment and padding, to improve performance.
1680    *
1681    * Available since NNAPI feature level 1.
1682    *
1683    * @param execution The execution to be modified.
1684    * @param index The index of the output argument we are setting. It is
1685    *              an index into the lists passed to
1686    *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
1687    *              the index associated with {@link ANeuralNetworksModel_addOperand}.
1688    * @param type The {@link ANeuralNetworksOperandType} of the
1689    *             operand. Unless the output is omitted, this should be
1690    *             used to specify the dimensions that were left
1691    *             unspecified when the operand was added to the
1692    *             model. All other properties of the type must be the
1693    *             same as specified in the model. If the type is the same
1694    *             as specified when the model was built, NULL can be
1695    *             passed. Neither the {@link ANeuralNetworksOperandType}
1696    *             nor the dimensions it points to need to outlive the call
1697    *             to {@link ANeuralNetworksExecution_setOutput}.
1698    *             Since NNAPI feature level 3, the output operand can have unspecified
1699    *             dimensions or rank to be deduced dynamically during the execution.
1700    *             However, the user must provide a large enough buffer. The user
1701    *             can retrieve the output dimensional information after the execution
1702    *             by {@link ANeuralNetworksExecution_getOutputOperandRank} and
1703    *             {@link ANeuralNetworksExecution_getOutputOperandDimensions}.
1704    * @param buffer The buffer where the data is to be written.
1705    * @param length The size of the data value in bytes plus any end padding.
1706    *
1707    * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
1708    *         name is not recognized or the buffer is too small for the output.
1709    */
1710   int (*ANeuralNetworksExecution_setOutput)(ANeuralNetworksExecution *execution, int32_t index,
1711                                             const ANeuralNetworksOperandType *type, void *buffer, size_t length);
1712 
1713   /**
1714    * Associate a region of a memory object with an output of the model of the
1715    * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
1716    * been scheduled. Once evaluation of the execution has been scheduled, the
1717    * application must not change the content of the region until the execution has
1718    * completed.
1719    *
1720    * <p>The provided memory must outlive the execution.</p>
1721    *
1722    * If the output is optional, you can indicate that it is omitted by
1723    * using {@link ANeuralNetworksExecution_setOutput} instead, passing nullptr for
1724    * buffer and 0 for length.
1725    *
1726    * If the memory is an AHardwareBuffer of a format other than AHARDWAREBUFFER_FORMAT_BLOB created
1727    * from {@link ANeuralNetworksMemory_createFromAHardwareBuffer}, or an opaque memory object created
1728    * from {@link ANeuralNetworksMemory_createFromDesc}, both offset and length must be 0, indicating
1729    * the whole memory is used.
1730    *
1731    * Otherwise, if the user has not set the execution to accept padded output memory objects by
1732    * calling {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, then the length argument
1733    * must be equal to the raw size of the output (i.e. the size of an element multiplied by the
1734    * number of elements). Passing a length argument with value not equal to the raw size of the output
1735    * will result in ANEURALNETWORKS_BAD_DATA.
1736    *
1737    * Otherwise, if the user has set the execution to accept padded output memory objects by calling
1738    * {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, the length argument may be greater
1739    * than the raw size of the output, and the extra bytes at the end of the memory region may be used
1740    * by the driver to access data in chunks, for efficiency. Passing a length argument with value
1741    * less than the raw size of the output will result in ANEURALNETWORKS_BAD_DATA.
1742    *
1743    * This function may only be invoked when the execution is in the preparation state.
1744    *
1745    * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1746    * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on
1747    * AHardwareBuffer usage.
1748    * See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects
1749    * created from memory descriptors.
1750    * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput} and
1751    * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput} for information on getting
1752    * preferred memory alignment and padding, to improve performance.
1753    *
1754    * Available since NNAPI feature level 1.
1755    *
1756    * @param execution The execution to be modified.
1757    * @param index The index of the output argument we are setting. It is
1758    *              an index into the lists passed to
1759    *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
1760    *              the index associated with {@link ANeuralNetworksModel_addOperand}.
1761    * @param type The {@link ANeuralNetworksOperandType} of the operand. This should be
1762    *             used to specify the dimensions that were left
1763    *             unspecified when the operand was added to the
1764    *             model. All other properties of the type must be the
1765    *             same as specified in the model. If the type is the same
1766    *             as specified when the model was built, NULL can be
1767    *             passed. Neither the {@link ANeuralNetworksOperandType}
1768    *             nor the dimensions it points to need to outlive the call
1769    *             to {@link ANeuralNetworksExecution_setOutputFromMemory}.
1770    *             Since NNAPI feature level 3, the output operand can have unspecified
1771    *             dimensions or rank to be deduced dynamically during the execution.
1772    *             However, the user must provide a large enough memory. The user
1773    *             can retrieve the output dimensional information after the execution
1774    *             by {@link ANeuralNetworksExecution_getOutputOperandRank} and
1775    *             {@link ANeuralNetworksExecution_getOutputOperandDimensions}.
1776    * @param memory The memory where the data is to be stored.
1777    * @param offset This specifies the location of the data within the memory.
1778    *               The offset is in bytes from the start of memory.
1779    * @param length The size of the data value in bytes plus any end padding.
1780    *
1781    * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the
1782    *         name is not recognized or the buffer is too small for the output.
1783    */
1784   int (*ANeuralNetworksExecution_setOutputFromMemory)(ANeuralNetworksExecution *execution, int32_t index,
1785                                                       const ANeuralNetworksOperandType *type,
1786                                                       const ANeuralNetworksMemory *memory, size_t offset,
1787                                                       size_t length);
1788 
1789   /**
1790    * Schedule asynchronous evaluation of the execution.
1791    *
1792    * <p>Schedules asynchronous evaluation of the execution. Once the execution
1793    * has completed and the outputs are ready to be consumed, the returned event
1794    * will be signaled. Use {@link ANeuralNetworksEvent_wait} to wait for that
1795    * event.
1796    * </p>
1797    *
1798    * ANeuralNetworksEvent_wait must be called to recuperate the resources used
1799    * by the execution.
1800    *
1801    * If {@link ANeuralNetworksExecution_setTimeout} was called on this execution,
1802    * and the execution is not able to complete before the timeout duration is
1803    * exceeded, then execution may be aborted, in which case
1804    * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned through
1805    * {@link ANeuralNetworksExecution_startCompute} or
1806    * {@link ANeuralNetworksEvent_wait} on the event object. If the device has a
1807    * feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel} that
1808    * is lower than {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout
1809    * duration hint will be ignored.
1810    *
1811    * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
1812    * the condition model does not output false within the loop timeout duration,
1813    * then execution will be aborted and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode}
1814    * will be returned through {@link ANeuralNetworksEvent_wait} on the event
1815    * object.
1816    *
1817    * If the device can detect before the execution has started that the execution
1818    * will not complete within the timeout duration, the device may choose to skip
1819    * the execution and instead return ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode}.
1820    *
1821    * Before NNAPI feature level 5, this function may only be invoked when the execution is in the
1822    * preparation state. Starting at NNAPI feature level 5, if the user sets the execution to be
1823    * reusable by {@link ANeuralNetworksExecution_setReusable}, this function may also be invoked when
1824    * the execution is in the completed state.
1825    *
1826    * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1827    *
1828    * See {@link ANeuralNetworksExecution_compute} for synchronous execution.
1829    * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution.
1830    * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for
1831    * asynchronous execution with dependencies.
1832    *
1833    * Available since NNAPI feature level 1.
1834    *
1835    * @param execution The execution to be scheduled and executed.
1836    * @param event The event that will be signaled on completion. event is set to
1837    *              NULL if there's an error.
1838    *
1839    * @return ANEURALNETWORKS_NO_ERROR if the evaluation is successfully scheduled.
1840    */
1841   int (*ANeuralNetworksExecution_startCompute)(ANeuralNetworksExecution *execution, ANeuralNetworksEvent **event);
1842 
1843   /**
1844    * Set the maximum expected duration of the specified execution.
1845    *
1846    * If the device is not able to complete the execution within the specified
1847    * duration, the execution may be aborted. The timeout duration begins at a
1848    * call to one of:
1849    * - {@link ANeuralNetworksExecution_burstCompute}
1850    * - {@link ANeuralNetworksExecution_compute}
1851    * - {@link ANeuralNetworksExecution_startCompute}
1852    * - {@link ANeuralNetworksExecution_startComputeWithDependencies}
1853    *
1854    * This timeout duration acts as a hint to drivers, and can be used to both free
1855    * up compute resources within the driver and return control back to the
1856    * application quicker than is possible without the hint. It enables drivers
1857    * that are able to estimate how long an execution will take to abort the
1858    * execution before it has even started if the driver believes the execution
1859    * cannot be completed within the timeout duration. Similarly, it enables
1860    * drivers to abort an ongoing execution if it is taking too long. However, this
1861    * call does not guarantee that the execution will complete or abort within the
1862    * timeout duration.
1863    *
1864    * By default (i.e., unless ANeuralNetworksExecution_setTimeout is called),
1865    * the timeout duration for execution is considered infinite.
1866    *
1867    * The {@link ANeuralNetworksExecution} must have been created from an
1868    * {@link ANeuralNetworksCompilation} which in turn was created from
1869    * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1,
1870    * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If the
1871    * device has a feature level reported by
1872    * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than
1873    * {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout duration hint will
1874    * be ignored.
1875    *
1876    * This function may only be invoked when the execution is in the preparation state.
1877    *
1878    * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1879    *
1880    * @param execution The execution to be modified.
1881    * @param duration The maximum amount of time in nanoseconds that is expected to
1882    *     be spent executing a model. If this duration is exceeded, the execution
1883    *     may be aborted. If set to 0, the timeout duration is considered infinite.
1884    *
1885    * @return ANEURALNETWORKS_NO_ERROR if successful.
1886    *
1887    * Available since NNAPI feature level 4.
1888    */
1889   int (*ANeuralNetworksExecution_setTimeout)(ANeuralNetworksExecution *execution, uint64_t duration);
1890 
1891   /**
1892    * Set the maximum duration of WHILE loops in the specified execution.
1893    *
1894    * This is a fuzzy per-loop timeout intended to prevent infinite loops.
1895    *
1896    * If a WHILE loop condition model does not output false within the specified
1897    * duration, the execution will be aborted.
1898    *
1899    * See {@link ANeuralNetworks_getDefaultLoopTimeout} and
1900    * {@link ANeuralNetworks_getMaximumLoopTimeout} for the default
1901    * and maximum timeout values.
1902    *
1903    * This function may only be invoked when the execution is in the preparation state.
1904    *
1905    * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1906    *
1907    * @param execution The execution to be modified.
1908    * @param duration The maximum amount of time in nanoseconds that can be spent
1909    *     executing a WHILE loop. If the specified duration value exceeds the value
1910    *     produced by {@link ANeuralNetworks_getMaximumLoopTimeout}, it will be
1911    *     overridden by that value.
1912    *
1913    * @return ANEURALNETWORKS_NO_ERROR if successful.
1914    *         ANEURALNETWORKS_BAD_STATE if execution has started.
1915    *         ANEURALNETWORKS_UNEXPECTED_NULL if execution is NULL.
1916    *
1917    * Available since NNAPI feature level 4.
1918    */
1919   int (*ANeuralNetworksExecution_setLoopTimeout)(ANeuralNetworksExecution *execution, uint64_t duration);
1920 
1921   /**
1922    * Get the default timeout value for WHILE loops.
1923    *
1924    * @return The default timeout value in nanoseconds.
1925    *
1926    * Available since NNAPI feature level 4.
1927    */
1928   uint64_t (*ANeuralNetworks_getDefaultLoopTimeout)();
1929 
1930   /**
1931    * Get the maximum timeout value for WHILE loops.
1932    *
1933    * @return The maximum timeout value in nanoseconds.
1934    *
1935    * Available since NNAPI feature level 4.
1936    */
1937   uint64_t (*ANeuralNetworks_getMaximumLoopTimeout)();
1938 
1939   /**
1940    * Waits until the execution completes.
1941    *
1942    * More than one thread can wait on an event. When the execution completes,
1943    * all threads will be released.
1944    *
1945    * If {@link ANeuralNetworksExecution_setTimeout} was called on the execution
1946    * corresponding to this event, and the execution is not able to complete
1947    * before the duration is exceeded, the execution may be aborted, in which case
1948    * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned here.
1949    *
1950    * If the execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
1951    * the condition model does not output false within the loop timeout duration,
1952    * the execution will be aborted, and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode}
1953    * will be returned here.
1954    *
1955    * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
1956    *
1957    * Available since NNAPI feature level 1.
1958    *
1959    * @param event The event that will be signaled on completion.
1960    * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
1961    *         ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot
1962    *         be properly mapped.
1963    */
1964   int (*ANeuralNetworksEvent_wait)(ANeuralNetworksEvent *event);
1965 
1966   /**
1967    * Destroys the event.
1968    *
1969    * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
1970    *
1971    * Available since NNAPI feature level 1.
1972    *
1973    * @param event The event object to be destroyed. Passing NULL is acceptable and
1974    *              results in no operation.
1975    */
1976   void (*ANeuralNetworksEvent_free)(ANeuralNetworksEvent *event);
1977 
1978   /**
1979    * Create a {@link ANeuralNetworksEvent} from a sync_fence file descriptor.
1980    *
1981    * The newly created ANeuralNetworksEvent does not take ownership of the provided sync_fence_fd,
1982    * it will instead dup the provided sync_fence_fd and own the duplicate.
1983    *
1984    * @param sync_fence_fd The sync_fence file descriptor.
1985    * @param event The newly created object or NULL if unsuccessful.
1986    *
1987    * @return ANEURALNETWORKS_NO_ERROR if successful.
1988    *
1989    * Available since NNAPI feature level 4.
1990    */
1991   int (*ANeuralNetworksEvent_createFromSyncFenceFd)(int sync_fence_fd, ANeuralNetworksEvent **event);
1992 
1993   /**
1994    * Get sync_fence file descriptor from the event.
1995    *
1996    * If the ANeuralNetworksEvent is not backed by a sync fence, the sync_fence_fd
1997    * will be set to -1, and ANEURALNETWORKS_BAD_DATA will be returned.
1998    *
1999    * See {@link ANeuralNetworksEvent_createFromSyncFenceFd} and
2000    * {@link ANeuralNetworksExecution_startComputeWithDependencies} to see how to create
2001    * an event backed by a sync fence.
2002    *
2003    * The user takes ownership of the returned fd, and must close the returned file descriptor when
2004    * it is no longer needed.
2005    *
2006    * @param event An event that is backed by a sync fence.
2007    * @param sync_fence_fd The sync_fence file descriptor. The file descriptor will
2008    *                      be set to -1 if there is an error.
2009    *
2010    * @return ANEURALNETWORKS_NO_ERROR if successful.
2011    *
2012    * Available since NNAPI feature level 4.
2013    */
2014   int (*ANeuralNetworksEvent_getSyncFenceFd)(const ANeuralNetworksEvent *event, int *sync_fence_fd);
2015 
2016   /**
2017    * Schedule asynchronous evaluation of the execution with dependencies.
2018    *
2019    * The execution will wait for all the depending events to be signaled before
2020    * starting the evaluation. Once the execution has completed and the outputs
2021    * are ready to be consumed, the returned event will be signaled. Depending on which
2022    * devices are handling the execution, the event could be backed by a sync fence.
2023    * Use {@link ANeuralNetworksEvent_wait} to wait for that event.
2024    *
2025    * ANeuralNetworksEvent_wait must be called to recurperate the resources used
2026    * by the execution.
2027    *
2028    * If parts of the execution are scheduled on devices that do not support fenced execution,
2029    * the function call may wait for such parts to finish before returning.
2030    *
2031    * The function will return an error if any of the events in dependencies is already in a bad
2032    * state. After the execution is scheduled, if any of the events in dependencies does not complete
2033    * normally, the execution will fail, and {@link ANeuralNetworksEvent_wait} on the returned
2034    * event will return an error.
2035    *
2036    * The function will return an error if any of the execution outputs has a tensor operand type
2037    * that is not fully specified.
2038    *
2039    * The function can be passed a timeout duration in nanoseconds. This timeout
2040    * duration acts as a hint to drivers in the same way that the timeout durations
2041    * in {@link ANeuralNetworksCompilation_setTimeout} and {@link
2042    * ANeuralNetworksExecution_setTimeout} act as hints to drivers. The duration
2043    * begins when all waitFor sync fences have been signaled, and can be used
2044    * together with {@link ANeuralNetworksExecution_setTimeout} which specifies the
2045    * maximum timeout duration beginning at the call to
2046    * {@link ANeuralNetworksExecution_startComputeWithDependencies}.
2047    * If the duration is non-zero, the {@link ANeuralNetworksExecution} must have been created
2048    * from an {@link ANeuralNetworksCompilation} which in turn was created from
2049    * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1,
2050    * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If either
2051    * the timeout duration from {@link ANeuralNetworksExecution_setTimeout} or the
2052    * timeout duration passed to this call is exceeded, the execution may be
2053    * aborted, in which case ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be
2054    * returned through {@link ANeuralNetworksExecution_startComputeWithDependencies}
2055    * or {@link ANeuralNetworksEvent_wait} on the event object. If the device has a
2056    * feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel} that
2057    * is lower than {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout duration
2058    * hints will be ignored.
2059    *
2060    * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and
2061    * the condition model does not output false within the loop timeout duration,
2062    * then execution will be aborted and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode}
2063    * will be returned through {@link ANeuralNetworksEvent_wait} on the event
2064    * object.
2065    *
2066    * Before NNAPI feature level 5, this function may only be invoked when the execution is in the
2067    * preparation state. Starting at NNAPI feature level 5, if the user sets the execution to be
2068    * reusable by {@link ANeuralNetworksExecution_setReusable}, this function may also be invoked when
2069    * the execution is in the completed state.
2070    *
2071    * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
2072    *
2073    * See {@link ANeuralNetworksExecution_compute} for synchronous execution.
2074    * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution.
2075    * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution.
2076    *
2077    * @param execution The execution to be scheduled and executed.
2078    * @param dependencies A set of depending events. The actual evaluation will not start
2079    *                     until all the events are signaled.
2080    * @param num_dependencies The number of events in the dependencies set.
2081    * @param duration The maximum amount of time in nanoseconds that is expected to
2082    *                 be spent executing the model after all dependencies are
2083    *                 signaled. If set to 0, the timeout duration is considered
2084    *                 infinite.
2085    * @param event The event that will be signaled on completion. event is set to
2086    *              NULL if there's an error.
2087    *
2088    * @return ANEURALNETWORKS_NO_ERROR if the evaluation is successfully scheduled.
2089    *
2090    * Available since NNAPI feature level 4.
2091    */
2092   int (*ANeuralNetworksExecution_startComputeWithDependencies)(ANeuralNetworksExecution *execution,
2093                                                                const ANeuralNetworksEvent *const *dependencies,
2094                                                                uint32_t num_dependencies, uint64_t duration,
2095                                                                ANeuralNetworksEvent **event);
2096 
2097   /**
2098    * Get the NNAPI runtime feature level.
2099    *
2100    * Since API level 31 (NNAPI feature level 5), the NNAPI runtime (libneuralnetworks.so) and its
2101    * API specification can be updated between Android API releases.
2102    *
2103    * On Android devices with API level 31 and newer, for NNAPI runtime feature discovery,
2104    * the NNAPI runtime feature level must be used instead of the Android device API level.
2105    *
2106    * On Android devices with API level 30 and older, the Android API level of the Android
2107    * device must be used for NNAPI runtime feature discovery. Enum values in
2108    * {@link FeatureLevelCode} from feature level 1 to 5 have their corresponding Android
2109    * API levels listed in their documentation, and each such enum value equals the corresponding
2110    * API level. This allows using the Android API level as the feature level.
2111    * This mapping between enum value and Android API level does not exist for feature levels
2112    * after NNAPI feature level 5 and API levels after S (31).
2113    *
2114    * Example usage:
2115    * int device_api_level = android_get_device_api_level();
2116    * int64_t runtime_feature_level = (device_api_level < __ANDROID_API_S__) ?
2117    *                                  device_api_level : ANeuralNetworks_getRuntimeFeatureLevel();
2118    *
2119    * Runtime feature level is closely related to NNAPI device feature level
2120    * ({@link ANeuralNetworksDevice_getFeatureLevel}), which indicates an NNAPI device feature level
2121    * (the most advanced NNAPI specification and features that the driver implements).
2122    * This function expresses NNAPI runtime feature level, which indicates the most advanced
2123    * NNAPI specification and features the runtime implements. An NNAPI device feature level is
2124    * always less than or equal to the runtime feature level.
2125    *
2126    * This function returns a {@link FeatureLevelCode} enum value,
2127    * which is the NNAPI specification version that this NNAPI runtime implements.
2128    * It is NOT an Android API level.
2129    *
2130    * Available since NNAPI feature level 5.
2131    */
2132   int64_t (*ANeuralNetworks_getRuntimeFeatureLevel)();
2133 
2134   /**
2135    * Specifies whether the {@link ANeuralNetworksExecution} is able to accept padded input and output
2136    * buffers and memory objects.
2137    *
2138    * By default, the input and output buffers and memory objects of {@link ANeuralNetworksExecution}
2139    * do not allow padding.
2140    *
2141    * Setting the execution to accept padded input and output buffers and memory objects enables the
2142    * length argument of {@link ANeuralNetworksExecution_setInput},
2143    * {@link ANeuralNetworksExecution_setInputFromMemory}, {@link ANeuralNetworksExecution_setOutput},
2144    * and {@link ANeuralNetworksExecution_setOutputFromMemory} to be greater than the raw size of the
2145    * operand (i.e. the size of an element multiplied by the number of elements). The extra bytes
2146    * at the end of the buffer or memory region may be used by the driver to access data in chunks,
2147    * for efficiency.
2148    *
2149    * This method must not be called after {@link ANeuralNetworksExecution_setInput},
2150    * {@link ANeuralNetworksExecution_setInputFromMemory}, {@link ANeuralNetworksExecution_setOutput},
2151    * or {@link ANeuralNetworksExecution_setOutputFromMemory}.
2152    *
2153    * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
2154    *
2155    * @param execution The execution to be modified.
2156    * @param enable 'true' if the execution is to be able to accept padded input and output buffers
2157    *               and memory objects, 'false' if not.
2158    *
2159    * @return ANEURALNETWORKS_NO_ERROR if successful.
2160    *         ANEURALNETWORKS_UNEXPECTED_NULL if execution is NULL.
2161    *         ANEURALNETWORKS_BAD_STATE if {@link ANeuralNetworksExecution_setInput},
2162    *         {@link ANeuralNetworksExecution_setInputFromMemory},
2163    *         {@link ANeuralNetworksExecution_setOutput}, or
2164    *         {@link ANeuralNetworksExecution_setOutputFromMemory} has been called on the execution.
2165    *
2166    * Available since NNAPI feature level 5.
2167    */
2168   int (*ANeuralNetworksExecution_enableInputAndOutputPadding)(ANeuralNetworksExecution *execution, bool enable);
2169 
2170   /**
2171    * Get the preferred buffer and memory alignment of an input to an execution created from a
2172    * particular compilation.
2173    *
2174    * The user may use the returned alignment value to guide the layout of the input buffer or memory
2175    * pool. To achieve the best performance, make sure the address of the buffer passed in
2176    * {@link ANeuralNetworksExecution_setInput}, or the offset value passed in
2177    * {@link ANeuralNetworksExecution_setInputFromMemory}, is a multiple of the preferred alignment
2178    * value of the same input. A driver may choose to allocate a separate buffer and do memory copying
2179    * if the provided buffer or memory does not satisfy the preferred alignment.
2180    *
2181    * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
2182    *
2183    * @param compilation The compilation object. It must already have been finished by calling
2184    *                    {@link ANeuralNetworksCompilation_finish}.
2185    * @param index The index of the input argument we are referencing from the compilation. It is
2186    *              an index into the inputs list passed to
2187    *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
2188    *              the index associated with {@link ANeuralNetworksModel_addOperand}.
2189    * @param alignment The returned preferred alignment in bytes. It will be a power of 2.
2190    *
2191    * @return ANEURALNETWORKS_NO_ERROR if successful.
2192    *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or alignment is NULL.
2193    *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
2194    *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
2195    *
2196    * Available since NNAPI feature level 5.
2197    */
2198   int (*ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput)(const ANeuralNetworksCompilation *compilation,
2199                                                                         uint32_t index, uint32_t *alignment);
2200 
2201   /**
2202    * Get the preferred buffer and memory end padding of an input to an execution created from a
2203    * particular compilation.
2204    *
2205    * The user may use the returned padding value to guide the layout of the input buffer or memory
2206    * pool. To achieve the best performance, make sure the length value passed in
2207    * {@link ANeuralNetworksExecution_setInput} or
2208    * {@link ANeuralNetworksExecution_setInputFromMemory} is greater than or equal to the raw size of
2209    * the input (i.e. the size of an element multiplied by the number of elements) rounding up to
2210    * a multiple of the preferred padding value of the same input. A driver may choose to allocate a
2211    * separate buffer and do memory copying if the provided buffer or memory value does not satisfy
2212    * the preferred padding.
2213    *
2214    * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
2215    * See {@link ANeuralNetworksExecution_enableInputAndOutputPadding},
2216    * {@link ANeuralNetworksExecution_setInput}, and
2217    * {@link ANeuralNetworksExecution_setInputFromMemory} for information on passing
2218    * input buffer or memory padding to the driver.
2219    *
2220    * @param compilation The compilation object. It must already have been finished by calling
2221    *                    {@link ANeuralNetworksCompilation_finish}.
2222    * @param index The index of the input argument we are referencing from the compilation. It is
2223    *              an index into the inputs list passed to
2224    *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
2225    *              the index associated with {@link ANeuralNetworksModel_addOperand}.
2226    * @param padding The returned preferred padding in bytes. It will be a power of 2.
2227    *
2228    * @return ANEURALNETWORKS_NO_ERROR if successful.
2229    *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or padding is NULL.
2230    *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
2231    *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
2232    *
2233    * Available since NNAPI feature level 5.
2234    */
2235   int (*ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput)(const ANeuralNetworksCompilation *compilation,
2236                                                                       uint32_t index, uint32_t *padding);
2237 
2238   /**
2239    * Get the preferred buffer and memory alignment of an output to an execution created from a
2240    * particular compilation.
2241    *
2242    * The user may use the returned alignment value to guide the layout of the output buffer or memory
2243    * pool. To achieve the best performance, make sure the address of the buffer passed in
2244    * {@link ANeuralNetworksExecution_setOutput}, or the offset value passed in
2245    * {@link ANeuralNetworksExecution_setOutputFromMemory}, is a multiple of the preferred alignment
2246    * value of the same output. A driver may choose to allocate a separate buffer and do memory copying
2247    * if the provided buffer or memory does not satisfy the preferred alignment.
2248    *
2249    * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
2250    *
2251    * @param compilation The compilation object. It must already have been finished by calling
2252    *                    {@link ANeuralNetworksCompilation_finish}.
2253    * @param index The index of the output argument we are referencing from the compilation. It is
2254    *              an index into the outputs list passed to
2255    *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
2256    *              the index associated with {@link ANeuralNetworksModel_addOperand}.
2257    * @param alignment The returned preferred alignment in bytes. It will be a power of 2.
2258    *
2259    * @return ANEURALNETWORKS_NO_ERROR if successful.
2260    *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or alignment is NULL.
2261    *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
2262    *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
2263    *
2264    * Available since NNAPI feature level 5.
2265    */
2266   int (*ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput)(const ANeuralNetworksCompilation *compilation,
2267                                                                          uint32_t index, uint32_t *alignment);
2268 
2269   /**
2270    * Get the preferred memory end padding of an output to an execution created from a particular
2271    * compilation.
2272    *
2273    * The user may use the returned padding value to guide the layout of the output buffer or memory
2274    * pool. To achieve the best performance, make sure the length value passed in
2275    * {@link ANeuralNetworksExecution_setOutput} or
2276    * {@link ANeuralNetworksExecution_setOutputFromMemory} is greater than or equal to the raw size of
2277    * the output (i.e. the size of an element multiplied by the number of elements) rounding up to
2278    * a multiple of the preferred padding value of the same output. A driver may choose to allocate a
2279    * separate buffer and do memory copying if the provided buffer or memory value does not satisfy
2280    * the preferred padding.
2281    *
2282    * See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
2283    * See {@link ANeuralNetworksExecution_enableInputAndOutputPadding},
2284    * {@link ANeuralNetworksExecution_setOutput}, and
2285    * {@link ANeuralNetworksExecution_setOutputFromMemory} for information on passing
2286    * output buffer or memory padding to the driver.
2287    *
2288    * @param compilation The compilation object. It must already have been finished by calling
2289    *                    {@link ANeuralNetworksCompilation_finish}.
2290    * @param index The index of the output argument we are referencing from the compilation. It is
2291    *              an index into the outputs list passed to
2292    *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
2293    *              the index associated with {@link ANeuralNetworksModel_addOperand}.
2294    * @param padding The returned preferred padding in bytes. It will be a power of 2.
2295    *
2296    * @return ANEURALNETWORKS_NO_ERROR if successful.
2297    *         ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or padding is NULL.
2298    *         ANEURALNETWORKS_BAD_STATE if the compilation has not been finished.
2299    *         ANEURALNETWORKS_BAD_DATA if the index is out of range.
2300    *
2301    * Available since NNAPI feature level 5.
2302    */
2303   int (*ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput)(const ANeuralNetworksCompilation *compilation,
2304                                                                        uint32_t index, uint32_t *padding);
2305 
2306   /**
2307    * Specifies whether the {@link ANeuralNetworksExecution} can be reused for multiple computations.
2308    *
2309    * By default, the {@link ANeuralNetworksExecution} is not reusable.
2310    *
2311    * Setting the execution to be reusable enables multiple computations to be scheduled and evaluated
2312    * on the same execution sequentially, either by means of
2313    * {@link ANeuralNetworksExecution_burstCompute}, {@link ANeuralNetworksExecution_compute},
2314    * {@link ANeuralNetworksExecution_startCompute} or
2315    * {@link ANeuralNetworksExecution_startComputeWithDependencies}: The application may schedule and
2316    * evaluate a computation again from the completed state of a reusable execution.
2317    *
2318    * This function may only be invoked when the execution is in the preparation state.
2319    *
2320    * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage.
2321    *
2322    * @param execution The execution to be modified.
2323    * @param reusable 'true' if the execution is to be reusable, 'false' if not.
2324    *
2325    * @return ANEURALNETWORKS_NO_ERROR if successful.
2326    *         ANEURALNETWORKS_UNEXPECTED_NULL if execution is NULL.
2327    *         ANEURALNETWORKS_BAD_STATE if the execution is not in the preparation state.
2328    *
2329    * Available since NNAPI feature level 5.
2330    */
2331   int (*ANeuralNetworksExecution_setReusable)(ANeuralNetworksExecution *execution, bool reusable);
2332 };
2333 
2334 const NNAPI *NNAPIImplementation();
2335 }  // namespace lite
2336 }  // namespace mindspore
2337 #endif  // MINDSPORE_LITE_SRC_LITERT_DELEGATE_NNAPI_NNAPI_IMPLEMENTATION_H_
2338