• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_NNAPI_NEURALNETWORKSTYPES_H_
16 #define TENSORFLOW_LITE_NNAPI_NEURALNETWORKSTYPES_H_
17 
18 #include <stdint.h>
19 #include <stdio.h>
20 
21 #include <string>
22 
23 typedef struct AHardwareBuffer AHardwareBuffer;
24 
25 // NN api types based on NNAPI header file
26 // https://developer.android.com/ndk/reference/group/neural-networks
27 
28 /**
29  * Operand types.
30  *
31  * The type of operands that can be added to a model.
32  *
33  * Although we define many types, most operators accept just a few
34  * types.  Most used are ANEURALNETWORKS_TENSOR_FLOAT32,
35  * ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, and ANEURALNETWORKS_INT32.
36  */
37 enum {
38   ANEURALNETWORKS_FLOAT32 = 0,
39   ANEURALNETWORKS_INT32 = 1,
40   ANEURALNETWORKS_UINT32 = 2,
41   ANEURALNETWORKS_TENSOR_FLOAT32 = 3,
42   ANEURALNETWORKS_TENSOR_INT32 = 4,
43   ANEURALNETWORKS_TENSOR_QUANT8_ASYMM = 5,
44   ANEURALNETWORKS_BOOL = 6,
45   ANEURALNETWORKS_TENSOR_QUANT16_SYMM = 7,
46   ANEURALNETWORKS_TENSOR_FLOAT16 = 8,
47   ANEURALNETWORKS_TENSOR_BOOL8 = 9,
48   ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL = 11,
49   ANEURALNETWORKS_TENSOR_QUANT8_SYMM = 13,
50   ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED = 14,
51 };
52 
53 /**
54  * Operation types.
55  *
56  * The type of operations that can be added to a model.
57  */
58 enum {
59   ANEURALNETWORKS_ADD = 0,
60   ANEURALNETWORKS_AVERAGE_POOL_2D = 1,
61   ANEURALNETWORKS_CONCATENATION = 2,
62   ANEURALNETWORKS_CONV_2D = 3,
63   ANEURALNETWORKS_DEPTHWISE_CONV_2D = 4,
64   ANEURALNETWORKS_DEPTH_TO_SPACE = 5,
65   ANEURALNETWORKS_DEQUANTIZE = 6,
66   ANEURALNETWORKS_EMBEDDING_LOOKUP = 7,
67   ANEURALNETWORKS_FLOOR = 8,
68   ANEURALNETWORKS_FULLY_CONNECTED = 9,
69   ANEURALNETWORKS_HASHTABLE_LOOKUP = 10,
70   ANEURALNETWORKS_L2_NORMALIZATION = 11,
71   ANEURALNETWORKS_L2_POOL_2D = 12,
72   ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION = 13,
73   ANEURALNETWORKS_LOGISTIC = 14,
74   ANEURALNETWORKS_LSH_PROJECTION = 15,
75   ANEURALNETWORKS_LSTM = 16,
76   ANEURALNETWORKS_MAX_POOL_2D = 17,
77   ANEURALNETWORKS_MUL = 18,
78   ANEURALNETWORKS_RELU = 19,
79   ANEURALNETWORKS_RELU1 = 20,
80   ANEURALNETWORKS_RELU6 = 21,
81   ANEURALNETWORKS_RESHAPE = 22,
82   ANEURALNETWORKS_RESIZE_BILINEAR = 23,
83   ANEURALNETWORKS_RNN = 24,
84   ANEURALNETWORKS_SOFTMAX = 25,
85   ANEURALNETWORKS_SPACE_TO_DEPTH = 26,
86   ANEURALNETWORKS_SVDF = 27,
87   ANEURALNETWORKS_TANH = 28,
88   ANEURALNETWORKS_BATCH_TO_SPACE_ND = 29,
89   ANEURALNETWORKS_DIV = 30,
90   ANEURALNETWORKS_MEAN = 31,
91   ANEURALNETWORKS_PAD = 32,
92   ANEURALNETWORKS_SPACE_TO_BATCH_ND = 33,
93   ANEURALNETWORKS_SQUEEZE = 34,
94   ANEURALNETWORKS_STRIDED_SLICE = 35,
95   ANEURALNETWORKS_SUB = 36,
96   ANEURALNETWORKS_TRANSPOSE = 37,
97   ANEURALNETWORKS_ABS = 38,
98   ANEURALNETWORKS_ARGMAX = 39,
99   ANEURALNETWORKS_ARGMIN = 40,
100   ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM = 42,
101   ANEURALNETWORKS_CAST = 45,
102   ANEURALNETWORKS_EQUAL = 48,
103   ANEURALNETWORKS_EXP = 49,
104   ANEURALNETWORKS_EXPAND_DIMS = 50,
105   ANEURALNETWORKS_GATHER = 51,
106   ANEURALNETWORKS_GREATER = 53,
107   ANEURALNETWORKS_GREATER_EQUAL = 54,
108   ANEURALNETWORKS_GROUPED_CONV_2D = 55,
109   ANEURALNETWORKS_LESS = 58,
110   ANEURALNETWORKS_LESS_EQUAL = 59,
111   ANEURALNETWORKS_LOG = 60,
112   ANEURALNETWORKS_LOGICAL_AND = 61,
113   ANEURALNETWORKS_LOGICAL_NOT = 62,
114   ANEURALNETWORKS_LOGICAL_OR = 63,
115   ANEURALNETWORKS_LOG_SOFTMAX = 64,
116   ANEURALNETWORKS_MAXIMUM = 65,
117   ANEURALNETWORKS_MINIMUM = 66,
118   ANEURALNETWORKS_NEG = 67,
119   ANEURALNETWORKS_NOT_EQUAL = 68,
120   ANEURALNETWORKS_PAD_V2 = 69,
121   ANEURALNETWORKS_POW = 70,
122   ANEURALNETWORKS_PRELU = 71,
123   ANEURALNETWORKS_QUANTIZE = 72,
124   ANEURALNETWORKS_QUANTIZED_16BIT_LSTM = 73,
125   ANEURALNETWORKS_REDUCE_ANY = 76,
126   ANEURALNETWORKS_REDUCE_MAX = 77,
127   ANEURALNETWORKS_REDUCE_MIN = 78,
128   ANEURALNETWORKS_REDUCE_PROD = 79,
129   ANEURALNETWORKS_REDUCE_SUM = 80,
130   ANEURALNETWORKS_RSQRT = 83,
131   ANEURALNETWORKS_SELECT = 84,
132   ANEURALNETWORKS_SIN = 85,
133   ANEURALNETWORKS_SLICE = 86,
134   ANEURALNETWORKS_SPLIT = 87,
135   ANEURALNETWORKS_SQRT = 88,
136   ANEURALNETWORKS_TILE = 89,
137   ANEURALNETWORKS_TOPK_V2 = 90,
138   ANEURALNETWORKS_TRANSPOSE_CONV = 91,
139   ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM = 92,
140   ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN = 93,
141   ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR = 94,
142   ANEURALNETWORKS_QUANTIZED_LSTM = 95,
143   ANEURALNETWORKS_IF = 96,
144   ANEURALNETWORKS_WHILE = 97,
145   ANEURALNETWORKS_ELU = 98,
146   ANEURALNETWORKS_HARD_SWISH = 99,
147   ANEURALNETWORKS_FILL = 100,
148   ANEURALNETWORKS_RANK = 101,
149   ANEURALNETWORKS_BATCH_MATMUL = 102,
150   ANEURALNETWORKS_PACK = 103,
151   ANEURALNETWORKS_MIRROR_PAD = 104,
152   ANEURALNETWORKS_REVERSE = 105,
153 };
154 
155 /**
156  * Fused activation function types.
157  *
158  */
159 enum {
160   ANEURALNETWORKS_FUSED_NONE = 0,
161   ANEURALNETWORKS_FUSED_RELU = 1,
162   ANEURALNETWORKS_FUSED_RELU1 = 2,
163   ANEURALNETWORKS_FUSED_RELU6 = 3,
164 };
165 
166 /**
167  * Execution preferences.
168  */
169 enum {
170   ANEURALNETWORKS_PREFER_LOW_POWER = 0,
171   ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1,
172   ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2,
173 };
174 
175 /**
176  * Result codes.
177  */
178 // LINT.IfChange
179 enum {
180   ANEURALNETWORKS_NO_ERROR = 0,
181   ANEURALNETWORKS_OUT_OF_MEMORY = 1,
182   ANEURALNETWORKS_INCOMPLETE = 2,
183   ANEURALNETWORKS_UNEXPECTED_NULL = 3,
184   ANEURALNETWORKS_BAD_DATA = 4,
185   ANEURALNETWORKS_OP_FAILED = 5,
186   ANEURALNETWORKS_BAD_STATE = 6,
187   ANEURALNETWORKS_UNMAPPABLE = 7,
188   ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE = 8,
189   ANEURALNETWORKS_UNAVAILABLE_DEVICE = 9,
190   ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT = 10,
191   ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT = 11,
192   ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT = 12,
193   ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT = 13,
194   ANEURALNETWORKS_DEAD_OBJECT = 14,
195 };
196 // LINT.ThenChange(//tensorflow/lite/delegates/nnapi/nnapi_delegate.cc:NnApiErrorDescription)
197 
198 /**
199  * Implicit padding algorithms.
200  */
201 enum {
202   ANEURALNETWORKS_PADDING_SAME = 1,
203   ANEURALNETWORKS_PADDING_VALID = 2,
204 };
205 
206 /**
207  * Device types.
208  *
209  * The type of NNAPI device.
210  */
211 enum {
212   /** The device type cannot be provided. */
213   ANEURALNETWORKS_DEVICE_UNKNOWN = 0,
214   /** The device does not fall into any category below. */
215   ANEURALNETWORKS_DEVICE_OTHER = 1,
216   /** The device runs NNAPI models on single or multi-core CPU. */
217   ANEURALNETWORKS_DEVICE_CPU = 2,
218   /** The device can run NNAPI models and also accelerate graphics APIs such
219    * as OpenGL ES and Vulkan. */
220   ANEURALNETWORKS_DEVICE_GPU = 3,
221   /** Dedicated accelerator for Machine Learning workloads. */
222   ANEURALNETWORKS_DEVICE_ACCELERATOR = 4,
223 };
224 
225 /**
226  * Relative execution priority.
227  *
228  * Available since API level 30.
229  */
230 enum {
231   ANEURALNETWORKS_PRIORITY_LOW = 90,
232   ANEURALNETWORKS_PRIORITY_MEDIUM = 100,
233   ANEURALNETWORKS_PRIORITY_HIGH = 110,
234   ANEURALNETWORKS_PRIORITY_DEFAULT = ANEURALNETWORKS_PRIORITY_MEDIUM,
235 };
236 
237 /**
238  * NNAPI feature levels.
239  *
240  * Each update of the NNAPI specification yields a new NNAPI feature level enum
241  * value. NNAPI feature level corrseponds to an NNAPI specification version that
242  * a driver and/or the NNAPI runtime can implement.
243  */
244 enum {
245   /** NNAPI specification available in Android O-MR1, Android NNAPI feature
246      level 1 */
247   ANEURALNETWORKS_FEATURE_LEVEL_1 = 27,
248   /** NNAPI specification available in Android P, Android NNAPI feature level 2
249    */
250   ANEURALNETWORKS_FEATURE_LEVEL_2 = 28,
251   /** NNAPI specification available in Android Q, Android NNAPI feature level 3
252    */
253   ANEURALNETWORKS_FEATURE_LEVEL_3 = 29,
254   /** NNAPI specification available in Android R, Android NNAPI feature level 4
255    */
256   ANEURALNETWORKS_FEATURE_LEVEL_4 = 30,
257   /**
258    * NNAPI specification available in Android S, Android NNAPI feature level 5.
259    * After Android S, the NNAPI specification can be updated between Android
260    * API releases.
261    */
262   ANEURALNETWORKS_FEATURE_LEVEL_5 = 31,
263   /** Android NNAPI feature level 6 */
264   ANEURALNETWORKS_FEATURE_LEVEL_6 = 1000006,
265   /** Android NNAPI feature level 7 */
266   ANEURALNETWORKS_FEATURE_LEVEL_7 = 1000007,
267 };
268 
269 /**
270  * ANeuralNetworksMemoryDesc is an opaque type that represents a memory
271  * descriptor.
272  *
273  * A memory descriptor describes the properties of a memory object, and is used
274  * by
275  * {@link ANeuralNetworksMemory_createFromDesc}.
276  *
277  * To use:
278  *   - Create a new memory descriptor by calling
279  *     {@link ANeuralNetworksMemoryDesc_create}.
280  *   - Specify all of the intended input and output roles by calling
281  *     {@link ANeuralNetworksMemoryDesc_addInputRole} and
282  *     {@link ANeuralNetworksMemoryDesc_addOutputRole}.
283  *   - Optionally, specify the memory dimensions by calling
284  *     {@link ANeuralNetworksMemoryDesc_setDimensions}.
285  *   - Complete the memory descriptor with {@link
286  * ANeuralNetworksMemoryDesc_finish}.
287  *   - Use the memory descriptor as many times as needed with
288  *     {@link ANeuralNetworksMemory_createFromDesc}.
289  *   - Destroy the memory descriptor with {@link
290  * ANeuralNetworksMemoryDesc_free}.
291  *
292  * A memory descriptor is completed by calling {@link
293  * ANeuralNetworksMemoryDesc_finish}. A memory descriptor is destroyed by
294  * calling {@link ANeuralNetworksMemoryDesc_free}.
295  *
296  * A memory descriptor must not be modified once
297  * {@link ANeuralNetworksMemoryDesc_finish}
298  * has been called on it.
299  *
300  * It is the application's responsibility to make sure that only
301  * one thread modifies a memory descriptor at a given time. It is however
302  * safe for more than one thread to use the memory descriptor once
303  * {@link ANeuralNetworksMemoryDesc_finish} has returned.
304  *
305  * It is also the application's responsibility to ensure that there are no other
306  * uses of the memory descriptor after calling {@link
307  * ANeuralNetworksMemoryDesc_free}. It is however safe to continue using a
308  * {@link ANeuralNetworksMemory} object created from the memory descriptor.
309  *
310  * Available since API level 30.
311  */
312 typedef struct ANeuralNetworksMemoryDesc ANeuralNetworksMemoryDesc;
313 
314 /**
315  * ANeuralNetworksMemory is an opaque type that represents memory.
316  *
317  * This type is used to represent shared memory, memory mapped files,
318  * and similar memories.
319  *
320  * By using shared memory, a program can efficiently communicate to the
321  * runtime and drivers the tensors that define a model. See
322  * {@link ANeuralNetworksModel_setOperandValueFromMemory}. An application
323  * should typically create one shared memory object that contains every tensor
324  * needed to define a model. {@link ANeuralNetworksMemory_createFromFd} can be
325  * used to create shared memory from a file handle. {@link
326  * ANeuralNetworksMemory_createShared} can be used to directly created shared
327  * memory.
328  *
329  * Memory objects can also be used to specify the input and output arguments of
330  * an execution. See {@link ANeuralNetworksExecution_setInputFromMemory}
331  * and {@link ANeuralNetworksExecution_setOutputFromMemory}.
332  */
333 typedef struct ANeuralNetworksMemory ANeuralNetworksMemory;
334 
335 /**
336  * ANeuralNetworksModel is an opaque type that contains a description of the
337  * mathematical operations that constitute the model.
338  *
339  * <p>The model will be built by calling<ul>
340  * <li>{@link ANeuralNetworksModel_create},</li>
341  * <li>{@link ANeuralNetworksModel_addOperation},</li>
342  * <li>{@link ANeuralNetworksModel_addOperand},</li>
343  * </ul>
344  *
345  * A model is completed by calling {@link ANeuralNetworksModel_finish}.
346  * A model is destroyed by calling {@link ANeuralNetworksModel_free}.
347  *
348  * <p>It is the application's responsibility to make sure that only one thread
349  * modifies a model at a given time. It is however safe for more than one
350  * thread to use the model once {@link ANeuralNetworksModel_finish} has
351  * returned.</p>
352  *
353  * <p>It is also the application's responsibility to ensure that there are no
354  * other uses of the model after calling {@link ANeuralNetworksModel_free}. This
355  * includes any compilation or execution object created using the model.</p>
356  */
357 typedef struct ANeuralNetworksModel ANeuralNetworksModel;
358 
359 /**
360  * ANeuralNetworksCompilation is an opaque type that can be used to compile
361  * a machine learning model.
362  *
363  * <p>To use:<ul>
364  *    <li>Create a new compilation instance by calling the
365  *        {@link ANeuralNetworksCompilation_create} function.</li>
366  *    <li>Perform the compilation with {@link
367  * ANeuralNetworksCompilation_start}.</li> <li>Wait for the compilation to
368  * complete with {@link ANeuralNetworksCompilation_wait}.</li> <li>Use the
369  * compilation as many times as needed with {@link
370  * ANeuralNetworksExecution_create}.</li> <li>Destroy the compilation with
371  * {@link ANeuralNetworksCompilation_free} once all executions using the
372  * compilation have completed.</li></ul></p>
373  *
374  * <p>A compilation cannot be modified once {@link
375  * ANeuralNetworksCompilation_start} has been called on it.</p>
376  *
377  * <p>It is the application's responsibility to make sure that only one thread
378  * modifies a compilation at a given time. It is however safe for more than one
379  * thread to use {@link ANeuralNetworksCompilation_wait} at the same time.
380  * It is also safe for multiple threads to use a compilation object once
381  * {@link ANeuralNetworksCompilation_wait} has completed.</p>
382  *
383  * <p>It is also the application's responsibility to ensure that there are no
384  * other uses of the compilation after calling {@link
385  * ANeuralNetworksCompilation_free}. This includes any execution object created
386  * using the compilation.</p>
387  */
388 typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation;
389 
390 /**
391  * ANeuralNetworksExecution is an opaque type that can be used to apply a
392  * machine learning model to a set of inputs.
393  *
394  * <p>To use:<ul>
395  *    <li>Create a new execution instance by calling the
396  *        {@link ANeuralNetworksExecution_create} function.</li>
397  *    <li>Associate data to the model inputs with
398  *        {@link ANeuralNetworksExecution_setInput} or
399  *        {@link ANeuralNetworksExecution_setInputFromMemory}.</li>
400  *    <li>Associate output buffers to the model outputs with
401  *        {@link ANeuralNetworksExecution_setOutput} or
402  *        {@link ANeuralNetworksExecution_setOutputFromMemory}.</li>
403  *    <li>Apply the model with {@link
404  * ANeuralNetworksExecution_startCompute}.</li> <li>Wait for the execution to
405  * complete with {@link ANeuralNetworksExecution_wait}.</li> <li>Destroy the
406  * execution with
407  *        {@link ANeuralNetworksExecution_free}.</li></ul></p>
408  *
409  * <p>An execution cannot be modified once {@link
410  * ANeuralNetworksExecution_start} has been called on it.</p>
411  *
412  * <p>An execution can be applied to a model with
413  * {@link ANeuralNetworksExecution_startCompute} only once. Create new
414  * executions to do new evaluations of the model.</p>
415  *
416  * <p>It is the application's responsibility to make sure that only one thread
417  * modifies an execution at a given time. It is however safe for more than one
418  * thread to use {@link ANeuralNetworksExecution_wait} at the same time.</p>
419  *
420  * <p>It is also the application's responsibility to ensure that there are no
421  * other uses of the request after calling {@link
422  * ANeuralNetworksRequest_free}.</p>
423  */
424 typedef struct ANeuralNetworksExecution ANeuralNetworksExecution;
425 
426 /**
427  * Parameters for ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL operand.
428  */
429 typedef struct ANeuralNetworksSymmPerChannelQuantParams {
430   /* The index of the channel dimension. */
431   uint32_t channelDim;
432   /** The size of the scale array. Should be equal to dimension[channelDim] of
433    * the Operand. */
434   uint32_t scaleCount;
435   /** The array of scaling values for each channel. Each value must be greater
436    * than zero. */
437   const float* scales;
438 } ANeuralNetworksSymmPerChannelQuantParams;
439 
440 /**
441  * ANeuralNetworksBurst is an opaque type that can be used to reduce the latency
442  * of a rapid sequence of executions. It will likely cause overhead if only used
443  * for a single execution.
444  *
445  * ANeuralNetworksBurst serves as a context object for any number of inferences
446  * using {@link ANeuralNetworksExecution} objects. An ANeuralNetworksBurst
447  * object and the {@link ANeuralNetworksExecution} objects used with it must all
448  * have been created from the same {@link ANeuralNetworksCompilation} object.
449  *
450  * This object is also used as a hint to drivers, providing insight to the
451  * lifetime of a rapid sequence of executions. For example, a driver may choose
452  * to increase the clock frequency of its accelerator for the lifetime of a
453  * burst object.
454  *
455  * <p>To use:<ul>
456  *    <li>Create a new burst object by calling the
457  *        {@link ANeuralNetworksBurst_create} function.</li>
458  *    <li>For each execution:</li><ul>
459  *        <li>Create {@link ANeuralNetworksExecution} and configure its
460  *            properties (see {@link ANeuralNetworksExecution} for
461  * details).</li> <li>Apply the model synchronously with
462  *            {@link ANeuralNetworksExecution_burstCompute}, reusing the same
463  *            {@link ANeuralNetworksBurst} with the new
464  *            {@link ANeuralNetworksExecution}.</li>
465  *        <li>Use and free the {@link ANeuralNetworksExecution}.</li></ul>
466  *    <li>Destroy the burst with
467  *        {@link ANeuralNetworksBurst_free}.</li></ul></p>
468  *
469  * Available since API level 29.
470  */
471 typedef struct ANeuralNetworksBurst ANeuralNetworksBurst;
472 
473 /**
474  * ANeuralNetworksOperandType describes the type of an operand.
475  * This structure is used to describe both scalars and tensors.
476  */
477 typedef struct ANeuralNetworksOperandType {
478   /** The data type, e.g ANEURALNETWORKS_INT8. */
479   int32_t type;
480   /** The number of dimensions. It should be 0 for scalars. */
481   uint32_t dimensionCount;
482   /** The dimensions of the tensor. It should be nullptr for scalars. */
483   const uint32_t* dimensions;
484   /** These two fields are only used for quantized tensors.
485    * They should be zero for scalars and non-fixed point tensors.
486    * The dequantized value of each entry is (value - offset) * scale.
487    */
488   float scale;
489   int32_t zeroPoint;
490 } ANeuralNetworksOperandType;
491 
492 /**
493  * ANeuralNetworksEvent is an opaque type that represents an event
494  * that will be signaled once an execution completes.
495  */
496 typedef struct ANeuralNetworksEvent ANeuralNetworksEvent;
497 
498 typedef int32_t ANeuralNetworksOperationType;
499 
500 /**
501  * ANeuralNetworksDevice is an opaque type that represents a device.
502  *
503  * This type is used to query basic properties and supported operations of the
504  * corresponding device, and control which device(s) a model is to be run on.
505  *
506  * Available since API level 29.
507  */
508 typedef struct ANeuralNetworksDevice ANeuralNetworksDevice;
509 
510 /**
511  * Diagnostic result codes.
512  */
513 typedef enum {
514   ANNDIAG_NO_ERROR = 0,
515 
516   /**
517    * Failure caused by failure to load support library driver.
518    */
519   ANNDIAG_FAILED_TO_LOAD_SL = 1,
520 
521   /**
522    * Failure caused by failure to register HAL service.
523    */
524   ANNDIAG_FAILED_TO_REGISTER_SERVICE = 2,
525 
526   /**
527    * General failure.
528    */
529   ANNDIAG_GENERAL_ERROR = 3,
530 
531   /**
532    * Invalid argument
533    */
534   ANNDIAG_INVALID_ARGUMENT = 4,
535 } ANeuralNetworksDiagnosticResultCode;
536 
537 /**
538  * Diagnostic data class.
539  */
540 typedef enum {
541   ANNDIAG_DATA_CLASS_UNKNOWN = 0,
542   ANNDIAG_DATA_CLASS_OTHER = 1,
543   ANNDIAG_DATA_CLASS_FLOAT32 = 2,
544   ANNDIAG_DATA_CLASS_FLOAT16 = 3,
545   ANNDIAG_DATA_CLASS_QUANT = 4,
546   ANNDIAG_DATA_CLASS_MIXED = 5
547 } ANeuralNetworksDiagnosticDataClass;
548 
549 /**
550  * Diagnostic execution mode.
551  */
552 typedef enum {
553   ANNDIAG_EXECUTION_MODE_UNKNOWN = 0,
554   ANNDIAG_EXECUTION_MODE_ASYNC = 1,
555   ANNDIAG_EXECUTION_MODE_SYNC = 2,
556   ANNDIAG_EXECUTION_MODE_BURST = 3,
557   ANNDIAG_EXECUTION_MODE_ASYNC_WITH_DEPS = 4,
558 } ANeuralNetworksDiagnosticExecutionMode;
559 
560 typedef struct ANeuralNetworksDiagnosticCompilationInfo
561     ANeuralNetworksDiagnosticCompilationInfo;
562 typedef struct ANeuralNetworksDiagnosticExecutionInfo
563     ANeuralNetworksDiagnosticExecutionInfo;
564 typedef void (*ANeuralNetworksDiagnosticCompilationFinishedCallback)(
565     const void* context, const ANeuralNetworksDiagnosticCompilationInfo* info);
566 typedef void (*ANeuralNetworksDiagnosticExecutionFinishedCallback)(
567     const void* context, const ANeuralNetworksDiagnosticExecutionInfo* info);
568 
569 // nn api function types
570 
571 typedef int (*ANeuralNetworksMemory_createFromFd_fn)(
572     size_t size, int protect, int fd, size_t offset,
573     ANeuralNetworksMemory** memory);
574 
575 typedef void (*ANeuralNetworksMemory_free_fn)(ANeuralNetworksMemory* memory);
576 
577 typedef int (*ANeuralNetworksModel_create_fn)(ANeuralNetworksModel** model);
578 
579 typedef int (*ANeuralNetworksModel_finish_fn)(ANeuralNetworksModel* model);
580 
581 typedef void (*ANeuralNetworksModel_free_fn)(ANeuralNetworksModel* model);
582 
583 typedef int (*ANeuralNetworksCompilation_create_fn)(
584     ANeuralNetworksModel* model, ANeuralNetworksCompilation** compilation);
585 
586 typedef void (*ANeuralNetworksCompilation_free_fn)(
587     ANeuralNetworksCompilation* compilation);
588 
589 typedef int (*ANeuralNetworksCompilation_setPreference_fn)(
590     ANeuralNetworksCompilation* compilation, int32_t preference);
591 
592 typedef int (*ANeuralNetworksCompilation_finish_fn)(
593     ANeuralNetworksCompilation* compilation);
594 
595 typedef int (*ANeuralNetworksModel_addOperand_fn)(
596     ANeuralNetworksModel* model, const ANeuralNetworksOperandType* type);
597 
598 typedef int (*ANeuralNetworksModel_setOperandValue_fn)(
599     ANeuralNetworksModel* model, int32_t index, const void* buffer,
600     size_t length);
601 
602 typedef int (*ANeuralNetworksModel_setOperandSymmPerChannelQuantParams_fn)(
603     ANeuralNetworksModel* model, int32_t index,
604     const ANeuralNetworksSymmPerChannelQuantParams* channelQuant);
605 
606 typedef int (*ANeuralNetworksModel_setOperandValueFromMemory_fn)(
607     ANeuralNetworksModel* model, int32_t index,
608     const ANeuralNetworksMemory* memory, size_t offset, size_t length);
609 
610 typedef int (*ANeuralNetworksModel_addOperation_fn)(
611     ANeuralNetworksModel* model, ANeuralNetworksOperationType type,
612     uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount,
613     const uint32_t* outputs);
614 
615 typedef int (*ANeuralNetworksModel_identifyInputsAndOutputs_fn)(
616     ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs,
617     uint32_t outputCount, const uint32_t* outputs);
618 
619 typedef int (*ANeuralNetworksModel_relaxComputationFloat32toFloat16_fn)(
620     ANeuralNetworksModel* model, bool allow);
621 
622 typedef int (*ANeuralNetworksExecution_create_fn)(
623     ANeuralNetworksCompilation* compilation,
624     ANeuralNetworksExecution** execution);
625 
626 typedef void (*ANeuralNetworksExecution_free_fn)(
627     ANeuralNetworksExecution* execution);
628 
629 typedef int (*ANeuralNetworksExecution_setInput_fn)(
630     ANeuralNetworksExecution* execution, int32_t index,
631     const ANeuralNetworksOperandType* type, const void* buffer, size_t length);
632 
633 typedef int (*ANeuralNetworksExecution_setInputFromMemory_fn)(
634     ANeuralNetworksExecution* execution, int32_t index,
635     const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory,
636     size_t offset, size_t length);
637 
638 typedef int (*ANeuralNetworksExecution_setOutput_fn)(
639     ANeuralNetworksExecution* execution, int32_t index,
640     const ANeuralNetworksOperandType* type, void* buffer, size_t length);
641 
642 typedef int (*ANeuralNetworksExecution_setOutputFromMemory_fn)(
643     ANeuralNetworksExecution* execution, int32_t index,
644     const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory,
645     size_t offset, size_t length);
646 
647 typedef int (*ANeuralNetworksExecution_startCompute_fn)(
648     ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event);
649 
650 typedef int (*ANeuralNetworksEvent_wait_fn)(ANeuralNetworksEvent* event);
651 
652 typedef void (*ANeuralNetworksEvent_free_fn)(ANeuralNetworksEvent* event);
653 
654 typedef int (*ASharedMemory_create_fn)(const char* name, size_t size);
655 
656 typedef int (*ANeuralNetworks_getDeviceCount_fn)(uint32_t* numDevices);
657 
658 typedef int (*ANeuralNetworks_getDevice_fn)(uint32_t devIndex,
659                                             ANeuralNetworksDevice** device);
660 
661 typedef int (*ANeuralNetworksDevice_getName_fn)(
662     const ANeuralNetworksDevice* device, const char** name);
663 
664 typedef int (*ANeuralNetworksDevice_getType_fn)(
665     const ANeuralNetworksDevice* device, int32_t* type);
666 
667 typedef int (*ANeuralNetworksDevice_getVersion_fn)(
668     const ANeuralNetworksDevice* device, const char** version);
669 
670 typedef int (*ANeuralNetworksDevice_getFeatureLevel_fn)(
671     const ANeuralNetworksDevice* device, int64_t* featureLevel);
672 
673 typedef int (*ANeuralNetworksModel_getSupportedOperationsForDevices_fn)(
674     const ANeuralNetworksModel* model,
675     const ANeuralNetworksDevice* const* devices, uint32_t numDevices,
676     bool* supportedOps);
677 
678 typedef int (*ANeuralNetworksCompilation_createForDevices_fn)(
679     ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices,
680     uint32_t numDevices, ANeuralNetworksCompilation** compilation);
681 
682 typedef int (*ANeuralNetworksCompilation_setCaching_fn)(
683     ANeuralNetworksCompilation* compilation, const char* cacheDir,
684     const uint8_t* token);
685 
686 typedef int (*ANeuralNetworksCompilation_setTimeout_fn)(
687     ANeuralNetworksCompilation* compilation, uint64_t duration);
688 
689 typedef int (*ANeuralNetworksCompilation_setPriority_fn)(
690     ANeuralNetworksCompilation* compilation, int priority);
691 
692 typedef int (*ANeuralNetworksExecution_compute_fn)(
693     ANeuralNetworksExecution* execution);
694 
695 typedef int (*ANeuralNetworksExecution_setTimeout_fn)(
696     ANeuralNetworksExecution* execution, uint64_t duration);
697 
698 typedef int (*ANeuralNetworksExecution_setLoopTimeout_fn)(
699     ANeuralNetworksExecution* execution, uint64_t duration);
700 
701 typedef int (*ANeuralNetworksExecution_getOutputOperandRank_fn)(
702     ANeuralNetworksExecution* execution, int32_t index, uint32_t* rank);
703 
704 typedef int (*ANeuralNetworksExecution_getOutputOperandDimensions_fn)(
705     ANeuralNetworksExecution* execution, int32_t index, uint32_t* dimensions);
706 
707 typedef int (*ANeuralNetworksBurst_create_fn)(
708     ANeuralNetworksCompilation* compilation, ANeuralNetworksBurst** burst);
709 
710 typedef void (*ANeuralNetworksBurst_free_fn)(ANeuralNetworksBurst* burst);
711 
712 typedef int (*ANeuralNetworksExecution_burstCompute_fn)(
713     ANeuralNetworksExecution* execution, ANeuralNetworksBurst* burst);
714 
715 typedef int (*ANeuralNetworksMemory_createFromAHardwareBuffer_fn)(
716     const AHardwareBuffer* ahwb, ANeuralNetworksMemory** memory);
717 
718 typedef int (*ANeuralNetworksExecution_setMeasureTiming_fn)(
719     ANeuralNetworksExecution* execution, bool measure);
720 
721 typedef enum {
722   // Execution time on hardware (not driver, which runs on host processor).
723   ANEURALNETWORKS_DURATION_ON_HARDWARE = 0,
724   // Execution time in driver (including time on hardware).  Excludes overhead
725   // such as that of the runtime itself and the IPC needed for the runtime to
726   // communicate with the driver.
727   ANEURALNETWORKS_DURATION_IN_DRIVER = 1,
728   // Execution time on hardware, after all dependencies have been signaled.
729   // If no dependencies specified (for example, if the execution was scheduled
730   // other
731   // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}),
732   // the
733   // reported time will be the same as ANEURALNETWORKS_DURATION_ON_HARDWARE.
734   // Available since API level 30.
735   ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE = 2,
736   // Execution time in driver, after all dependencies have been signaled.
737   // Excludes
738   // overhead such as that of the runtime itself and the IPC needed for the
739   // runtime
740   // to communicate with the driver.
741   // If no dependencies specified (for example, if the execution was scheduled
742   // other
743   // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}),
744   // the
745   // reported time will be the same as ANEURALNETWORKS_DURATION_IN_DRIVER.
746   // Available since API level 30.
747   ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER = 3,
748 } DurationCode;
749 
750 typedef int (*ANeuralNetworksExecution_getDuration_fn)(
751     const ANeuralNetworksExecution* execution, int32_t durationCode,
752     uint64_t* duration);
753 
754 typedef int (*ANeuralNetworksDevice_getExtensionSupport_fn)(
755     const ANeuralNetworksDevice* device, const char* extensionName,
756     bool* isExtensionSupported);
757 
758 typedef int (*ANeuralNetworksModel_getExtensionOperandType_fn)(
759     ANeuralNetworksModel* model, const char* extensionName,
760     uint16_t operandCodeWithinExtension, int32_t* type);
761 
762 typedef int (*ANeuralNetworksModel_getExtensionOperationType_fn)(
763     ANeuralNetworksModel* model, const char* extensionName,
764     uint16_t operationCodeWithinExtension, ANeuralNetworksOperationType* type);
765 
766 typedef int (*ANeuralNetworksModel_setOperandExtensionData_fn)(
767     ANeuralNetworksModel* model, int32_t index, const void* data,
768     size_t length);
769 
770 typedef int (*ANeuralNetworksMemoryDesc_create_fn)(
771     ANeuralNetworksMemoryDesc** desc);
772 
773 typedef void (*ANeuralNetworksMemoryDesc_free_fn)(
774     ANeuralNetworksMemoryDesc* desc);
775 
776 typedef int (*ANeuralNetworksMemoryDesc_addInputRole_fn)(
777     ANeuralNetworksMemoryDesc* desc,
778     const ANeuralNetworksCompilation* compilation, uint32_t index,
779     float frequency);
780 
781 typedef int (*ANeuralNetworksMemoryDesc_addOutputRole_fn)(
782     ANeuralNetworksMemoryDesc* desc,
783     const ANeuralNetworksCompilation* compilation, uint32_t index,
784     float frequency);
785 
786 typedef int (*ANeuralNetworksMemoryDesc_setDimensions_fn)(
787     ANeuralNetworksMemoryDesc* desc, uint32_t rank, const uint32_t* dimensions);
788 
789 typedef int (*ANeuralNetworksMemoryDesc_finish_fn)(
790     ANeuralNetworksMemoryDesc* desc);
791 
792 typedef int (*ANeuralNetworksMemory_createFromDesc_fn)(
793     const ANeuralNetworksMemoryDesc* desc, ANeuralNetworksMemory** memory);
794 
795 typedef int (*ANeuralNetworksMemory_copy_fn)(const ANeuralNetworksMemory* src,
796                                              const ANeuralNetworksMemory* dst);
797 
798 typedef int (*ANeuralNetworksEvent_createFromSyncFenceFd_fn)(
799     int sync_fence_fd, ANeuralNetworksEvent** event);
800 
801 typedef int (*ANeuralNetworksEvent_getSyncFenceFd_fn)(
802     const ANeuralNetworksEvent* event, int* sync_fence_fd);
803 
804 typedef int (*ANeuralNetworksExecution_startComputeWithDependencies_fn)(
805     ANeuralNetworksExecution* execution,
806     const ANeuralNetworksEvent* const* dependencies, uint32_t num_dependencies,
807     uint64_t duration, ANeuralNetworksEvent** event);
808 
809 typedef int (*ANeuralNetworksExecution_enableInputAndOutputPadding_fn)(
810     ANeuralNetworksExecution* execution, bool enable);
811 
812 typedef int (*ANeuralNetworksExecution_setReusable_fn)(
813     ANeuralNetworksExecution* execution, bool reusable);
814 
815 typedef int64_t (*ANeuralNetworks_getRuntimeFeatureLevel_fn)();
816 
817 typedef int32_t (*SL_ANeuralNetworksDiagnosticCompilationInfo_getSessionId_fn)(
818     const ANeuralNetworksDiagnosticCompilationInfo* diagnosticCompilationInfo);
819 
820 typedef int64_t (
821     *SL_ANeuralNetworksDiagnosticCompilationInfo_getNnApiVersion_fn)(
822     const ANeuralNetworksDiagnosticCompilationInfo* diagnosticCompilationInfo);
823 
824 typedef const uint8_t* (
825     *SL_ANeuralNetworksDiagnosticCompilationInfo_getModelArchHash_fn)(
826     const ANeuralNetworksDiagnosticCompilationInfo* diagnosticCompilationInfo);
827 
828 typedef const char* (
829     *SL_ANeuralNetworksDiagnosticCompilationInfo_getDeviceIds_fn)(
830     const ANeuralNetworksDiagnosticCompilationInfo* diagnosticCompilationInfo);
831 
832 typedef int32_t (*SL_ANeuralNetworksDiagnosticCompilationInfo_getErrorCode_fn)(
833     const ANeuralNetworksDiagnosticCompilationInfo* diagnosticCompilationInfo);
834 
835 typedef ANeuralNetworksDiagnosticDataClass (
836     *SL_ANeuralNetworksDiagnosticCompilationInfo_getInputDataClass_fn)(
837     const ANeuralNetworksDiagnosticCompilationInfo* diagnosticCompilationInfo);
838 
839 typedef ANeuralNetworksDiagnosticDataClass (
840     *SL_ANeuralNetworksDiagnosticCompilationInfo_getOutputDataClass_fn)(
841     const ANeuralNetworksDiagnosticCompilationInfo* diagnosticCompilationInfo);
842 
843 typedef uint64_t (
844     *SL_ANeuralNetworksDiagnosticCompilationInfo_getCompilationTimeNanos_fn)(
845     const ANeuralNetworksDiagnosticCompilationInfo* diagnosticCompilationInfo);
846 
847 typedef bool (*SL_ANeuralNetworksDiagnosticCompilationInfo_isCachingEnabled_fn)(
848     const ANeuralNetworksDiagnosticCompilationInfo* diagnosticCompilationInfo);
849 
850 typedef bool (
851     *SL_ANeuralNetworksDiagnosticCompilationInfo_isControlFlowUsed_fn)(
852     const ANeuralNetworksDiagnosticCompilationInfo* diagnosticCompilationInfo);
853 
854 typedef bool (
855     *SL_ANeuralNetworksDiagnosticCompilationInfo_areDynamicTensorsUsed_fn)(
856     const ANeuralNetworksDiagnosticCompilationInfo* diagnosticCompilationInfo);
857 
858 typedef int32_t (*SL_ANeuralNetworksDiagnosticExecutionInfo_getSessionId_fn)(
859     const ANeuralNetworksDiagnosticExecutionInfo* diagnosticExecutionInfo);
860 
861 typedef int64_t (*SL_ANeuralNetworksDiagnosticExecutionInfo_getNnApiVersion_fn)(
862     const ANeuralNetworksDiagnosticExecutionInfo* diagnosticExecutionInfo);
863 
864 typedef const uint8_t* (
865     *SL_ANeuralNetworksDiagnosticExecutionInfo_getModelArchHash_fn)(
866     const ANeuralNetworksDiagnosticExecutionInfo* diagnosticExecutionInfo);
867 
868 typedef const char* (
869     *SL_ANeuralNetworksDiagnosticExecutionInfo_getDeviceIds_fn)(
870     const ANeuralNetworksDiagnosticExecutionInfo* diagnosticExecutionInfo);
871 
872 typedef ANeuralNetworksDiagnosticExecutionMode (
873     *SL_ANeuralNetworksDiagnosticExecutionInfo_getExecutionMode_fn)(
874     const ANeuralNetworksDiagnosticExecutionInfo* diagnosticExecutionInfo);
875 
876 typedef ANeuralNetworksDiagnosticDataClass (
877     *SL_ANeuralNetworksDiagnosticExecutionInfo_getInputDataClass_fn)(
878     const ANeuralNetworksDiagnosticExecutionInfo* diagnosticExecutionInfo);
879 
880 typedef ANeuralNetworksDiagnosticDataClass (
881     *SL_ANeuralNetworksDiagnosticExecutionInfo_getOutputDataClass_fn)(
882     const ANeuralNetworksDiagnosticExecutionInfo* diagnosticExecutionInfo);
883 
884 typedef uint32_t (*SL_ANeuralNetworksDiagnosticExecutionInfo_getErrorCode_fn)(
885     const ANeuralNetworksDiagnosticExecutionInfo* diagnosticExecutionInfo);
886 
887 typedef uint64_t (
888     *SL_ANeuralNetworksDiagnosticExecutionInfo_getRuntimeExecutionTimeNanos_fn)(
889     const ANeuralNetworksDiagnosticExecutionInfo* diagnosticExecutionInfo);
890 
891 typedef uint64_t (
892     *SL_ANeuralNetworksDiagnosticExecutionInfo_getDriverExecutionTimeNanos_fn)(
893     const ANeuralNetworksDiagnosticExecutionInfo* diagnosticExecutionInfo);
894 
895 typedef uint64_t (
896     *SL_ANeuralNetworksDiagnosticExecutionInfo_getHardwareExecutionTimeNanos_fn)(
897     const ANeuralNetworksDiagnosticExecutionInfo* diagnosticExecutionInfo);
898 
899 typedef bool (*SL_ANeuralNetworksDiagnosticExecutionInfo_isCachingEnabled_fn)(
900     const ANeuralNetworksDiagnosticExecutionInfo* diagnosticExecutionInfo);
901 
902 typedef bool (*SL_ANeuralNetworksDiagnosticExecutionInfo_isControlFlowUsed_fn)(
903     const ANeuralNetworksDiagnosticExecutionInfo* diagnosticExecutionInfo);
904 
905 typedef bool (
906     *SL_ANeuralNetworksDiagnosticExecutionInfo_areDynamicTensorsUsed_fn)(
907     const ANeuralNetworksDiagnosticExecutionInfo* diagnosticExecutionInfo);
908 
909 typedef void (*SL_ANeuralNetworksDiagnostic_registerCallbacks_fn)(
910     ANeuralNetworksDiagnosticCompilationFinishedCallback compilationCallback,
911     ANeuralNetworksDiagnosticExecutionFinishedCallback executionCallback,
912     void* callbackContext);
913 
914 #endif  // TENSORFLOW_LITE_NNAPI_NEURALNETWORKSTYPES_H_
915