• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_NNAPI_NEURALNETWORKSTYPES_H_
16 #define TENSORFLOW_LITE_NNAPI_NEURALNETWORKSTYPES_H_
17 
18 #include <stdint.h>
19 #include <stdio.h>
20 
21 #include <string>
22 
23 typedef struct AHardwareBuffer AHardwareBuffer;
24 
25 // NN api types based on NNAPI header file
26 // https://developer.android.com/ndk/reference/group/neural-networks
27 
28 /**
29  * Operand types.
30  *
31  * The type of operands that can be added to a model.
32  *
33  * Although we define many types, most operators accept just a few
34  * types.  Most used are ANEURALNETWORKS_TENSOR_FLOAT32,
35  * ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, and ANEURALNETWORKS_INT32.
36  */
37 enum {
38   ANEURALNETWORKS_FLOAT32 = 0,
39   ANEURALNETWORKS_INT32 = 1,
40   ANEURALNETWORKS_UINT32 = 2,
41   ANEURALNETWORKS_TENSOR_FLOAT32 = 3,
42   ANEURALNETWORKS_TENSOR_INT32 = 4,
43   ANEURALNETWORKS_TENSOR_QUANT8_ASYMM = 5,
44   ANEURALNETWORKS_BOOL = 6,
45   ANEURALNETWORKS_TENSOR_QUANT16_SYMM = 7,
46   ANEURALNETWORKS_TENSOR_FLOAT16 = 8,
47   ANEURALNETWORKS_TENSOR_BOOL8 = 9,
48   ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL = 11,
49   ANEURALNETWORKS_TENSOR_QUANT8_SYMM = 13,
50   ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED = 14,
51 };
52 
53 /**
54  * Operation types.
55  *
56  * The type of operations that can be added to a model.
57  */
58 enum {
59   ANEURALNETWORKS_ADD = 0,
60   ANEURALNETWORKS_AVERAGE_POOL_2D = 1,
61   ANEURALNETWORKS_CONCATENATION = 2,
62   ANEURALNETWORKS_CONV_2D = 3,
63   ANEURALNETWORKS_DEPTHWISE_CONV_2D = 4,
64   ANEURALNETWORKS_DEPTH_TO_SPACE = 5,
65   ANEURALNETWORKS_DEQUANTIZE = 6,
66   ANEURALNETWORKS_EMBEDDING_LOOKUP = 7,
67   ANEURALNETWORKS_FLOOR = 8,
68   ANEURALNETWORKS_FULLY_CONNECTED = 9,
69   ANEURALNETWORKS_HASHTABLE_LOOKUP = 10,
70   ANEURALNETWORKS_L2_NORMALIZATION = 11,
71   ANEURALNETWORKS_L2_POOL_2D = 12,
72   ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION = 13,
73   ANEURALNETWORKS_LOGISTIC = 14,
74   ANEURALNETWORKS_LSH_PROJECTION = 15,
75   ANEURALNETWORKS_LSTM = 16,
76   ANEURALNETWORKS_MAX_POOL_2D = 17,
77   ANEURALNETWORKS_MUL = 18,
78   ANEURALNETWORKS_RELU = 19,
79   ANEURALNETWORKS_RELU1 = 20,
80   ANEURALNETWORKS_RELU6 = 21,
81   ANEURALNETWORKS_RESHAPE = 22,
82   ANEURALNETWORKS_RESIZE_BILINEAR = 23,
83   ANEURALNETWORKS_RNN = 24,
84   ANEURALNETWORKS_SOFTMAX = 25,
85   ANEURALNETWORKS_SPACE_TO_DEPTH = 26,
86   ANEURALNETWORKS_SVDF = 27,
87   ANEURALNETWORKS_TANH = 28,
88   ANEURALNETWORKS_BATCH_TO_SPACE_ND = 29,
89   ANEURALNETWORKS_DIV = 30,
90   ANEURALNETWORKS_MEAN = 31,
91   ANEURALNETWORKS_PAD = 32,
92   ANEURALNETWORKS_SPACE_TO_BATCH_ND = 33,
93   ANEURALNETWORKS_SQUEEZE = 34,
94   ANEURALNETWORKS_STRIDED_SLICE = 35,
95   ANEURALNETWORKS_SUB = 36,
96   ANEURALNETWORKS_TRANSPOSE = 37,
97   ANEURALNETWORKS_ABS = 38,
98   ANEURALNETWORKS_ARGMAX = 39,
99   ANEURALNETWORKS_ARGMIN = 40,
100   ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM = 42,
101   ANEURALNETWORKS_CAST = 45,
102   ANEURALNETWORKS_EQUAL = 48,
103   ANEURALNETWORKS_EXP = 49,
104   ANEURALNETWORKS_EXPAND_DIMS = 50,
105   ANEURALNETWORKS_GATHER = 51,
106   ANEURALNETWORKS_GREATER = 53,
107   ANEURALNETWORKS_GREATER_EQUAL = 54,
108   ANEURALNETWORKS_LESS = 58,
109   ANEURALNETWORKS_LESS_EQUAL = 59,
110   ANEURALNETWORKS_LOG = 60,
111   ANEURALNETWORKS_LOGICAL_AND = 61,
112   ANEURALNETWORKS_LOGICAL_NOT = 62,
113   ANEURALNETWORKS_LOGICAL_OR = 63,
114   ANEURALNETWORKS_LOG_SOFTMAX = 64,
115   ANEURALNETWORKS_MAXIMUM = 65,
116   ANEURALNETWORKS_MINIMUM = 66,
117   ANEURALNETWORKS_NEG = 67,
118   ANEURALNETWORKS_NOT_EQUAL = 68,
119   ANEURALNETWORKS_PAD_V2 = 69,
120   ANEURALNETWORKS_POW = 70,
121   ANEURALNETWORKS_PRELU = 71,
122   ANEURALNETWORKS_QUANTIZE = 72,
123   ANEURALNETWORKS_QUANTIZED_16BIT_LSTM = 73,
124   ANEURALNETWORKS_REDUCE_ANY = 76,
125   ANEURALNETWORKS_REDUCE_MAX = 77,
126   ANEURALNETWORKS_REDUCE_MIN = 78,
127   ANEURALNETWORKS_REDUCE_PROD = 79,
128   ANEURALNETWORKS_REDUCE_SUM = 80,
129   ANEURALNETWORKS_RSQRT = 83,
130   ANEURALNETWORKS_SELECT = 84,
131   ANEURALNETWORKS_SIN = 85,
132   ANEURALNETWORKS_SLICE = 86,
133   ANEURALNETWORKS_SPLIT = 87,
134   ANEURALNETWORKS_SQRT = 88,
135   ANEURALNETWORKS_TILE = 89,
136   ANEURALNETWORKS_TOPK_V2 = 90,
137   ANEURALNETWORKS_TRANSPOSE_CONV = 91,
138   ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM = 92,
139   ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN = 93,
140   ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR = 94,
141   ANEURALNETWORKS_QUANTIZED_LSTM = 95,
142   ANEURALNETWORKS_IF = 96,
143   ANEURALNETWORKS_WHILE = 97,
144   ANEURALNETWORKS_ELU = 98,
145   ANEURALNETWORKS_HARD_SWISH = 99,
146   ANEURALNETWORKS_FILL = 100,
147   ANEURALNETWORKS_RANK = 101,
148 };
149 
150 /**
151  * Fused activation function types.
152  *
153  */
154 enum {
155   ANEURALNETWORKS_FUSED_NONE = 0,
156   ANEURALNETWORKS_FUSED_RELU = 1,
157   ANEURALNETWORKS_FUSED_RELU1 = 2,
158   ANEURALNETWORKS_FUSED_RELU6 = 3,
159 };
160 
161 /**
162  * Execution preferences.
163  */
164 enum {
165   ANEURALNETWORKS_PREFER_LOW_POWER = 0,
166   ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1,
167   ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2,
168 };
169 
170 /**
171  * Result codes.
172  */
173 // LINT.IfChange
174 enum {
175   ANEURALNETWORKS_NO_ERROR = 0,
176   ANEURALNETWORKS_OUT_OF_MEMORY = 1,
177   ANEURALNETWORKS_INCOMPLETE = 2,
178   ANEURALNETWORKS_UNEXPECTED_NULL = 3,
179   ANEURALNETWORKS_BAD_DATA = 4,
180   ANEURALNETWORKS_OP_FAILED = 5,
181   ANEURALNETWORKS_BAD_STATE = 6,
182   ANEURALNETWORKS_UNMAPPABLE = 7,
183   ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE = 8,
184   ANEURALNETWORKS_UNAVAILABLE_DEVICE = 9,
185   ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT = 10,
186   ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT = 11,
187   ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT = 12,
188   ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT = 13,
189   ANEURALNETWORKS_DEAD_OBJECT = 14,
190 };
191 // LINT.ThenChange(//tensorflow/lite/delegates/nnapi/nnapi_delegate.cc:NnApiErrorDescription)
192 
193 /**
194  * Implicit padding algorithms.
195  */
196 enum {
197   ANEURALNETWORKS_PADDING_SAME = 1,
198   ANEURALNETWORKS_PADDING_VALID = 2,
199 };
200 
201 /**
202  * Device types.
203  *
204  * The type of NNAPI device.
205  */
206 enum {
207   /** The device type cannot be provided. */
208   ANEURALNETWORKS_DEVICE_UNKNOWN = 0,
209   /** The device does not fall into any category below. */
210   ANEURALNETWORKS_DEVICE_OTHER = 1,
211   /** The device runs NNAPI models on single or multi-core CPU. */
212   ANEURALNETWORKS_DEVICE_CPU = 2,
213   /** The device can run NNAPI models and also accelerate graphics APIs such
214    * as OpenGL ES and Vulkan. */
215   ANEURALNETWORKS_DEVICE_GPU = 3,
216   /** Dedicated accelerator for Machine Learning workloads. */
217   ANEURALNETWORKS_DEVICE_ACCELERATOR = 4,
218 };
219 
220 /**
221  * Relative execution priority.
222  *
223  * Available since API level 30.
224  */
225 enum {
226   ANEURALNETWORKS_PRIORITY_LOW = 90,
227   ANEURALNETWORKS_PRIORITY_MEDIUM = 100,
228   ANEURALNETWORKS_PRIORITY_HIGH = 110,
229   ANEURALNETWORKS_PRIORITY_DEFAULT = ANEURALNETWORKS_PRIORITY_MEDIUM,
230 };
231 
232 /**
233  * NNAPI feature levels.
234  *
235  * Each update of the NNAPI specification yields a new NNAPI feature level enum
236  * value. NNAPI feature level corrseponds to an NNAPI specification version that
237  * a driver and/or the NNAPI runtime can implement.
238  */
239 enum {
240   /** NNAPI specification available in Android O-MR1, Android NNAPI feature
241      level 1 */
242   ANEURALNETWORKS_FEATURE_LEVEL_1 = 27,
243   /** NNAPI specification available in Android P, Android NNAPI feature level 2
244    */
245   ANEURALNETWORKS_FEATURE_LEVEL_2 = 28,
246   /** NNAPI specification available in Android Q, Android NNAPI feature level 3
247    */
248   ANEURALNETWORKS_FEATURE_LEVEL_3 = 29,
249   /** NNAPI specification available in Android R, Android NNAPI feature level 4
250    */
251   ANEURALNETWORKS_FEATURE_LEVEL_4 = 30,
252   /**
253    * NNAPI specification available in Android S, Android NNAPI feature level 5.
254    * After Android S, the NNAPI specification can be updated between Android
255    * API releases.
256    */
257   ANEURALNETWORKS_FEATURE_LEVEL_5 = 31,
258 };
259 
260 /**
261  * ANeuralNetworksMemoryDesc is an opaque type that represents a memory
262  * descriptor.
263  *
264  * A memory descriptor describes the properties of a memory object, and is used
265  * by
266  * {@link ANeuralNetworksMemory_createFromDesc}.
267  *
268  * To use:
269  *   - Create a new memory descriptor by calling
270  *     {@link ANeuralNetworksMemoryDesc_create}.
271  *   - Specify all of the intended input and output roles by calling
272  *     {@link ANeuralNetworksMemoryDesc_addInputRole} and
273  *     {@link ANeuralNetworksMemoryDesc_addOutputRole}.
274  *   - Optionally, specify the memory dimensions by calling
275  *     {@link ANeuralNetworksMemoryDesc_setDimensions}.
276  *   - Complete the memory descriptor with {@link
277  * ANeuralNetworksMemoryDesc_finish}.
278  *   - Use the memory descriptor as many times as needed with
279  *     {@link ANeuralNetworksMemory_createFromDesc}.
280  *   - Destroy the memory descriptor with {@link
281  * ANeuralNetworksMemoryDesc_free}.
282  *
283  * A memory descriptor is completed by calling {@link
284  * ANeuralNetworksMemoryDesc_finish}. A memory descriptor is destroyed by
285  * calling {@link ANeuralNetworksMemoryDesc_free}.
286  *
287  * A memory descriptor must not be modified once
288  * {@link ANeuralNetworksMemoryDesc_finish}
289  * has been called on it.
290  *
291  * It is the application's responsibility to make sure that only
292  * one thread modifies a memory descriptor at a given time. It is however
293  * safe for more than one thread to use the memory descriptor once
294  * {@link ANeuralNetworksMemoryDesc_finish} has returned.
295  *
296  * It is also the application's responsibility to ensure that there are no other
297  * uses of the memory descriptor after calling {@link
298  * ANeuralNetworksMemoryDesc_free}. It is however safe to continue using a
299  * {@link ANeuralNetworksMemory} object created from the memory descriptor.
300  *
301  * Available since API level 30.
302  */
303 typedef struct ANeuralNetworksMemoryDesc ANeuralNetworksMemoryDesc;
304 
305 /**
306  * ANeuralNetworksMemory is an opaque type that represents memory.
307  *
308  * This type is used to represent shared memory, memory mapped files,
309  * and similar memories.
310  *
311  * By using shared memory, a program can efficiently communicate to the
312  * runtime and drivers the tensors that define a model. See
313  * {@link ANeuralNetworksModel_setOperandValueFromMemory}. An application
314  * should typically create one shared memory object that contains every tensor
315  * needed to define a model. {@link ANeuralNetworksMemory_createFromFd} can be
316  * used to create shared memory from a file handle. {@link
317  * ANeuralNetworksMemory_createShared} can be used to directly created shared
318  * memory.
319  *
320  * Memory objects can also be used to specify the input and output arguments of
321  * an execution. See {@link ANeuralNetworksExecution_setInputFromMemory}
322  * and {@link ANeuralNetworksExecution_setOutputFromMemory}.
323  */
324 typedef struct ANeuralNetworksMemory ANeuralNetworksMemory;
325 
326 /**
327  * ANeuralNetworksModel is an opaque type that contains a description of the
328  * mathematical operations that constitute the model.
329  *
330  * <p>The model will be built by calling<ul>
331  * <li>{@link ANeuralNetworksModel_create},</li>
332  * <li>{@link ANeuralNetworksModel_addOperation},</li>
333  * <li>{@link ANeuralNetworksModel_addOperand},</li>
334  * </ul>
335  *
336  * A model is completed by calling {@link ANeuralNetworksModel_finish}.
337  * A model is destroyed by calling {@link ANeuralNetworksModel_free}.
338  *
339  * <p>It is the application's responsibility to make sure that only one thread
340  * modifies a model at a given time. It is however safe for more than one
341  * thread to use the model once {@link ANeuralNetworksModel_finish} has
342  * returned.</p>
343  *
344  * <p>It is also the application's responsibility to ensure that there are no
345  * other uses of the model after calling {@link ANeuralNetworksModel_free}. This
346  * includes any compilation or execution object created using the model.</p>
347  */
348 typedef struct ANeuralNetworksModel ANeuralNetworksModel;
349 
350 /**
351  * ANeuralNetworksCompilation is an opaque type that can be used to compile
352  * a machine learning model.
353  *
354  * <p>To use:<ul>
355  *    <li>Create a new compilation instance by calling the
356  *        {@link ANeuralNetworksCompilation_create} function.</li>
357  *    <li>Perform the compilation with {@link
358  * ANeuralNetworksCompilation_start}.</li> <li>Wait for the compilation to
359  * complete with {@link ANeuralNetworksCompilation_wait}.</li> <li>Use the
360  * compilation as many times as needed with {@link
361  * ANeuralNetworksExecution_create}.</li> <li>Destroy the compilation with
362  * {@link ANeuralNetworksCompilation_free} once all executions using the
363  * compilation have completed.</li></ul></p>
364  *
365  * <p>A compilation cannot be modified once {@link
366  * ANeuralNetworksCompilation_start} has been called on it.</p>
367  *
368  * <p>It is the application's responsibility to make sure that only one thread
369  * modifies a compilation at a given time. It is however safe for more than one
370  * thread to use {@link ANeuralNetworksCompilation_wait} at the same time.
371  * It is also safe for multiple threads to use a compilation object once
372  * {@link ANeuralNetworksCompilation_wait} has completed.</p>
373  *
374  * <p>It is also the application's responsibility to ensure that there are no
375  * other uses of the compilation after calling {@link
376  * ANeuralNetworksCompilation_free}. This includes any execution object created
377  * using the compilation.</p>
378  */
379 typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation;
380 
381 /**
382  * ANeuralNetworksExecution is an opaque type that can be used to apply a
383  * machine learning model to a set of inputs.
384  *
385  * <p>To use:<ul>
386  *    <li>Create a new execution instance by calling the
387  *        {@link ANeuralNetworksExecution_create} function.</li>
388  *    <li>Associate data to the model inputs with
389  *        {@link ANeuralNetworksExecution_setInput} or
390  *        {@link ANeuralNetworksExecution_setInputFromMemory}.</li>
391  *    <li>Associate output buffers to the model outputs with
392  *        {@link ANeuralNetworksExecution_setOutput} or
393  *        {@link ANeuralNetworksExecution_setOutputFromMemory}.</li>
394  *    <li>Apply the model with {@link
395  * ANeuralNetworksExecution_startCompute}.</li> <li>Wait for the execution to
396  * complete with {@link ANeuralNetworksExecution_wait}.</li> <li>Destroy the
397  * execution with
398  *        {@link ANeuralNetworksExecution_free}.</li></ul></p>
399  *
400  * <p>An execution cannot be modified once {@link
401  * ANeuralNetworksExecution_start} has been called on it.</p>
402  *
403  * <p>An execution can be applied to a model with
404  * {@link ANeuralNetworksExecution_startCompute} only once. Create new
405  * executions to do new evaluations of the model.</p>
406  *
407  * <p>It is the application's responsibility to make sure that only one thread
408  * modifies an execution at a given time. It is however safe for more than one
409  * thread to use {@link ANeuralNetworksExecution_wait} at the same time.</p>
410  *
411  * <p>It is also the application's responsibility to ensure that there are no
412  * other uses of the request after calling {@link
413  * ANeuralNetworksRequest_free}.</p>
414  */
415 typedef struct ANeuralNetworksExecution ANeuralNetworksExecution;
416 
417 /**
418  * Parameters for ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL operand.
419  */
420 typedef struct ANeuralNetworksSymmPerChannelQuantParams {
421   /* The index of the channel dimension. */
422   uint32_t channelDim;
423   /** The size of the scale array. Should be equal to dimension[channelDim] of
424    * the Operand. */
425   uint32_t scaleCount;
426   /** The array of scaling values for each channel. Each value must be greater
427    * than zero. */
428   const float* scales;
429 } ANeuralNetworksSymmPerChannelQuantParams;
430 
431 /**
432  * ANeuralNetworksBurst is an opaque type that can be used to reduce the latency
433  * of a rapid sequence of executions. It will likely cause overhead if only used
434  * for a single execution.
435  *
436  * ANeuralNetworksBurst serves as a context object for any number of inferences
437  * using {@link ANeuralNetworksExecution} objects. An ANeuralNetworksBurst
438  * object and the {@link ANeuralNetworksExecution} objects used with it must all
439  * have been created from the same {@link ANeuralNetworksCompilation} object.
440  *
441  * This object is also used as a hint to drivers, providing insight to the
442  * lifetime of a rapid sequence of executions. For example, a driver may choose
443  * to increase the clock frequency of its accelerator for the lifetime of a
444  * burst object.
445  *
446  * <p>To use:<ul>
447  *    <li>Create a new burst object by calling the
448  *        {@link ANeuralNetworksBurst_create} function.</li>
449  *    <li>For each execution:</li><ul>
450  *        <li>Create {@link ANeuralNetworksExecution} and configure its
451  *            properties (see {@link ANeuralNetworksExecution} for
452  * details).</li> <li>Apply the model synchronously with
453  *            {@link ANeuralNetworksExecution_burstCompute}, reusing the same
454  *            {@link ANeuralNetworksBurst} with the new
455  *            {@link ANeuralNetworksExecution}.</li>
456  *        <li>Use and free the {@link ANeuralNetworksExecution}.</li></ul>
457  *    <li>Destroy the burst with
458  *        {@link ANeuralNetworksBurst_free}.</li></ul></p>
459  *
460  * Available since API level 29.
461  */
462 typedef struct ANeuralNetworksBurst ANeuralNetworksBurst;
463 
464 /**
465  * ANeuralNetworksOperandType describes the type of an operand.
466  * This structure is used to describe both scalars and tensors.
467  */
468 typedef struct ANeuralNetworksOperandType {
469   /** The data type, e.g ANEURALNETWORKS_INT8. */
470   int32_t type;
471   /** The number of dimensions. It should be 0 for scalars. */
472   uint32_t dimensionCount;
473   /** The dimensions of the tensor. It should be nullptr for scalars. */
474   const uint32_t* dimensions;
475   /** These two fields are only used for quantized tensors.
476    * They should be zero for scalars and non-fixed point tensors.
477    * The dequantized value of each entry is (value - offset) * scale.
478    */
479   float scale;
480   int32_t zeroPoint;
481 } ANeuralNetworksOperandType;
482 
483 /**
484  * ANeuralNetworksEvent is an opaque type that represents an event
485  * that will be signaled once an execution completes.
486  */
487 typedef struct ANeuralNetworksEvent ANeuralNetworksEvent;
488 
489 typedef int32_t ANeuralNetworksOperationType;
490 
491 /**
492  * ANeuralNetworksDevice is an opaque type that represents a device.
493  *
494  * This type is used to query basic properties and supported operations of the
495  * corresponding device, and control which device(s) a model is to be run on.
496  *
497  * Available since API level 29.
498  */
499 typedef struct ANeuralNetworksDevice ANeuralNetworksDevice;
500 
501 // nn api function types
502 
503 typedef int (*ANeuralNetworksMemory_createFromFd_fn)(
504     size_t size, int protect, int fd, size_t offset,
505     ANeuralNetworksMemory** memory);
506 
507 typedef void (*ANeuralNetworksMemory_free_fn)(ANeuralNetworksMemory* memory);
508 
509 typedef int (*ANeuralNetworksModel_create_fn)(ANeuralNetworksModel** model);
510 
511 typedef int (*ANeuralNetworksModel_finish_fn)(ANeuralNetworksModel* model);
512 
513 typedef void (*ANeuralNetworksModel_free_fn)(ANeuralNetworksModel* model);
514 
515 typedef int (*ANeuralNetworksCompilation_create_fn)(
516     ANeuralNetworksModel* model, ANeuralNetworksCompilation** compilation);
517 
518 typedef void (*ANeuralNetworksCompilation_free_fn)(
519     ANeuralNetworksCompilation* compilation);
520 
521 typedef int (*ANeuralNetworksCompilation_setPreference_fn)(
522     ANeuralNetworksCompilation* compilation, int32_t preference);
523 
524 typedef int (*ANeuralNetworksCompilation_finish_fn)(
525     ANeuralNetworksCompilation* compilation);
526 
527 typedef int (*ANeuralNetworksModel_addOperand_fn)(
528     ANeuralNetworksModel* model, const ANeuralNetworksOperandType* type);
529 
530 typedef int (*ANeuralNetworksModel_setOperandValue_fn)(
531     ANeuralNetworksModel* model, int32_t index, const void* buffer,
532     size_t length);
533 
534 typedef int (*ANeuralNetworksModel_setOperandSymmPerChannelQuantParams_fn)(
535     ANeuralNetworksModel* model, int32_t index,
536     const ANeuralNetworksSymmPerChannelQuantParams* channelQuant);
537 
538 typedef int (*ANeuralNetworksModel_setOperandValueFromMemory_fn)(
539     ANeuralNetworksModel* model, int32_t index,
540     const ANeuralNetworksMemory* memory, size_t offset, size_t length);
541 
542 typedef int (*ANeuralNetworksModel_addOperation_fn)(
543     ANeuralNetworksModel* model, ANeuralNetworksOperationType type,
544     uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount,
545     const uint32_t* outputs);
546 
547 typedef int (*ANeuralNetworksModel_identifyInputsAndOutputs_fn)(
548     ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs,
549     uint32_t outputCount, const uint32_t* outputs);
550 
551 typedef int (*ANeuralNetworksModel_relaxComputationFloat32toFloat16_fn)(
552     ANeuralNetworksModel* model, bool allow);
553 
554 typedef int (*ANeuralNetworksExecution_create_fn)(
555     ANeuralNetworksCompilation* compilation,
556     ANeuralNetworksExecution** execution);
557 
558 typedef void (*ANeuralNetworksExecution_free_fn)(
559     ANeuralNetworksExecution* execution);
560 
561 typedef int (*ANeuralNetworksExecution_setInput_fn)(
562     ANeuralNetworksExecution* execution, int32_t index,
563     const ANeuralNetworksOperandType* type, const void* buffer, size_t length);
564 
565 typedef int (*ANeuralNetworksExecution_setInputFromMemory_fn)(
566     ANeuralNetworksExecution* execution, int32_t index,
567     const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory,
568     size_t offset, size_t length);
569 
570 typedef int (*ANeuralNetworksExecution_setOutput_fn)(
571     ANeuralNetworksExecution* execution, int32_t index,
572     const ANeuralNetworksOperandType* type, void* buffer, size_t length);
573 
574 typedef int (*ANeuralNetworksExecution_setOutputFromMemory_fn)(
575     ANeuralNetworksExecution* execution, int32_t index,
576     const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory,
577     size_t offset, size_t length);
578 
579 typedef int (*ANeuralNetworksExecution_startCompute_fn)(
580     ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event);
581 
582 typedef int (*ANeuralNetworksEvent_wait_fn)(ANeuralNetworksEvent* event);
583 
584 typedef void (*ANeuralNetworksEvent_free_fn)(ANeuralNetworksEvent* event);
585 
586 typedef int (*ASharedMemory_create_fn)(const char* name, size_t size);
587 
588 typedef int (*ANeuralNetworks_getDeviceCount_fn)(uint32_t* numDevices);
589 
590 typedef int (*ANeuralNetworks_getDevice_fn)(uint32_t devIndex,
591                                             ANeuralNetworksDevice** device);
592 
593 typedef int (*ANeuralNetworksDevice_getName_fn)(
594     const ANeuralNetworksDevice* device, const char** name);
595 
596 typedef int (*ANeuralNetworksDevice_getType_fn)(
597     const ANeuralNetworksDevice* device, int32_t* type);
598 
599 typedef int (*ANeuralNetworksDevice_getVersion_fn)(
600     const ANeuralNetworksDevice* device, const char** version);
601 
602 typedef int (*ANeuralNetworksDevice_getFeatureLevel_fn)(
603     const ANeuralNetworksDevice* device, int64_t* featureLevel);
604 
605 typedef int (*ANeuralNetworksModel_getSupportedOperationsForDevices_fn)(
606     const ANeuralNetworksModel* model,
607     const ANeuralNetworksDevice* const* devices, uint32_t numDevices,
608     bool* supportedOps);
609 
610 typedef int (*ANeuralNetworksCompilation_createForDevices_fn)(
611     ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices,
612     uint32_t numDevices, ANeuralNetworksCompilation** compilation);
613 
614 typedef int (*ANeuralNetworksCompilation_setCaching_fn)(
615     ANeuralNetworksCompilation* compilation, const char* cacheDir,
616     const uint8_t* token);
617 
618 typedef int (*ANeuralNetworksCompilation_setTimeout_fn)(
619     ANeuralNetworksCompilation* compilation, uint64_t duration);
620 
621 typedef int (*ANeuralNetworksCompilation_setPriority_fn)(
622     ANeuralNetworksCompilation* compilation, int priority);
623 
624 typedef int (*ANeuralNetworksExecution_compute_fn)(
625     ANeuralNetworksExecution* execution);
626 
627 typedef int (*ANeuralNetworksExecution_setTimeout_fn)(
628     ANeuralNetworksExecution* execution, uint64_t duration);
629 
630 typedef int (*ANeuralNetworksExecution_setLoopTimeout_fn)(
631     ANeuralNetworksExecution* execution, uint64_t duration);
632 
633 typedef int (*ANeuralNetworksExecution_getOutputOperandRank_fn)(
634     ANeuralNetworksExecution* execution, int32_t index, uint32_t* rank);
635 
636 typedef int (*ANeuralNetworksExecution_getOutputOperandDimensions_fn)(
637     ANeuralNetworksExecution* execution, int32_t index, uint32_t* dimensions);
638 
639 typedef int (*ANeuralNetworksBurst_create_fn)(
640     ANeuralNetworksCompilation* compilation, ANeuralNetworksBurst** burst);
641 
642 typedef void (*ANeuralNetworksBurst_free_fn)(ANeuralNetworksBurst* burst);
643 
644 typedef int (*ANeuralNetworksExecution_burstCompute_fn)(
645     ANeuralNetworksExecution* execution, ANeuralNetworksBurst* burst);
646 
647 typedef int (*ANeuralNetworksMemory_createFromAHardwareBuffer_fn)(
648     const AHardwareBuffer* ahwb, ANeuralNetworksMemory** memory);
649 
650 typedef int (*ANeuralNetworksExecution_setMeasureTiming_fn)(
651     ANeuralNetworksExecution* execution, bool measure);
652 
653 typedef enum {
654   // Execution time on hardware (not driver, which runs on host processor).
655   ANEURALNETWORKS_DURATION_ON_HARDWARE = 0,
656   // Execution time in driver (including time on hardware).  Excludes overhead
657   // such as that of the runtime itself and the IPC needed for the runtime to
658   // communicate with the driver.
659   ANEURALNETWORKS_DURATION_IN_DRIVER = 1,
660   // Execution time on hardware, after all dependencies have been signaled.
661   // If no dependencies specified (for example, if the execution was scheduled
662   // other
663   // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}),
664   // the
665   // reported time will be the same as ANEURALNETWORKS_DURATION_ON_HARDWARE.
666   // Available since API level 30.
667   ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE = 2,
668   // Execution time in driver, after all dependencies have been signaled.
669   // Excludes
670   // overhead such as that of the runtime itself and the IPC needed for the
671   // runtime
672   // to communicate with the driver.
673   // If no dependencies specified (for example, if the execution was scheduled
674   // other
675   // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}),
676   // the
677   // reported time will be the same as ANEURALNETWORKS_DURATION_IN_DRIVER.
678   // Available since API level 30.
679   ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER = 3,
680 } DurationCode;
681 
682 typedef int (*ANeuralNetworksExecution_getDuration_fn)(
683     const ANeuralNetworksExecution* execution, int32_t durationCode,
684     uint64_t* duration);
685 
686 typedef int (*ANeuralNetworksDevice_getExtensionSupport_fn)(
687     const ANeuralNetworksDevice* device, const char* extensionName,
688     bool* isExtensionSupported);
689 
690 typedef int (*ANeuralNetworksModel_getExtensionOperandType_fn)(
691     ANeuralNetworksModel* model, const char* extensionName,
692     uint16_t operandCodeWithinExtension, int32_t* type);
693 
694 typedef int (*ANeuralNetworksModel_getExtensionOperationType_fn)(
695     ANeuralNetworksModel* model, const char* extensionName,
696     uint16_t operationCodeWithinExtension, ANeuralNetworksOperationType* type);
697 
698 typedef int (*ANeuralNetworksModel_setOperandExtensionData_fn)(
699     ANeuralNetworksModel* model, int32_t index, const void* data,
700     size_t length);
701 
702 typedef int (*ANeuralNetworksMemoryDesc_create_fn)(
703     ANeuralNetworksMemoryDesc** desc);
704 
705 typedef void (*ANeuralNetworksMemoryDesc_free_fn)(
706     ANeuralNetworksMemoryDesc* desc);
707 
708 typedef int (*ANeuralNetworksMemoryDesc_addInputRole_fn)(
709     ANeuralNetworksMemoryDesc* desc,
710     const ANeuralNetworksCompilation* compilation, uint32_t index,
711     float frequency);
712 
713 typedef int (*ANeuralNetworksMemoryDesc_addOutputRole_fn)(
714     ANeuralNetworksMemoryDesc* desc,
715     const ANeuralNetworksCompilation* compilation, uint32_t index,
716     float frequency);
717 
718 typedef int (*ANeuralNetworksMemoryDesc_setDimensions_fn)(
719     ANeuralNetworksMemoryDesc* desc, uint32_t rank, const uint32_t* dimensions);
720 
721 typedef int (*ANeuralNetworksMemoryDesc_finish_fn)(
722     ANeuralNetworksMemoryDesc* desc);
723 
724 typedef int (*ANeuralNetworksMemory_createFromDesc_fn)(
725     const ANeuralNetworksMemoryDesc* desc, ANeuralNetworksMemory** memory);
726 
727 typedef int (*ANeuralNetworksMemory_copy_fn)(const ANeuralNetworksMemory* src,
728                                              const ANeuralNetworksMemory* dst);
729 
730 typedef int (*ANeuralNetworksEvent_createFromSyncFenceFd_fn)(
731     int sync_fence_fd, ANeuralNetworksEvent** event);
732 
733 typedef int (*ANeuralNetworksEvent_getSyncFenceFd_fn)(
734     const ANeuralNetworksEvent* event, int* sync_fence_fd);
735 
736 typedef int (*ANeuralNetworksExecution_startComputeWithDependencies_fn)(
737     ANeuralNetworksExecution* execution,
738     const ANeuralNetworksEvent* const* dependencies, uint32_t num_dependencies,
739     uint64_t duration, ANeuralNetworksEvent** event);
740 
741 typedef int (*ANeuralNetworksExecution_enableInputAndOutputPadding_fn)(
742     ANeuralNetworksExecution* execution, bool enable);
743 
744 typedef int (*ANeuralNetworksExecution_setReusable_fn)(
745     ANeuralNetworksExecution* execution, bool reusable);
746 
747 typedef int64_t (*ANeuralNetworks_getRuntimeFeatureLevel_fn)();
748 #endif  // TENSORFLOW_LITE_NNAPI_NEURALNETWORKSTYPES_H_
749