• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_NNAPI_NEURALNETWORKSTYPES_H_
16 #define TENSORFLOW_LITE_NNAPI_NEURALNETWORKSTYPES_H_
17 
18 #include <stdint.h>
19 #include <stdio.h>
20 
21 #include <string>
22 
23 typedef struct AHardwareBuffer AHardwareBuffer;
24 
25 // NN api types based on NNAPI header file
26 // https://developer.android.com/ndk/reference/group/neural-networks
27 
28 /**
29  * Operand types.
30  *
31  * The type of operands that can be added to a model.
32  *
33  * Although we define many types, most operators accept just a few
34  * types.  Most used are ANEURALNETWORKS_TENSOR_FLOAT32,
35  * ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, and ANEURALNETWORKS_INT32.
36  */
37 enum {
38   ANEURALNETWORKS_FLOAT32 = 0,
39   ANEURALNETWORKS_INT32 = 1,
40   ANEURALNETWORKS_UINT32 = 2,
41   ANEURALNETWORKS_TENSOR_FLOAT32 = 3,
42   ANEURALNETWORKS_TENSOR_INT32 = 4,
43   ANEURALNETWORKS_TENSOR_QUANT8_ASYMM = 5,
44   ANEURALNETWORKS_BOOL = 6,
45   ANEURALNETWORKS_TENSOR_BOOL8 = 9,
46   ANEURALNETWORKS_TENSOR_QUANT16_SYMM = 7,
47   ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL = 11,
48   ANEURALNETWORKS_TENSOR_QUANT8_SYMM = 13,
49   ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED = 14,
50 };
51 
52 /**
53  * Operation types.
54  *
55  * The type of operations that can be added to a model.
56  */
57 enum {
58   ANEURALNETWORKS_ADD = 0,
59   ANEURALNETWORKS_AVERAGE_POOL_2D = 1,
60   ANEURALNETWORKS_CONCATENATION = 2,
61   ANEURALNETWORKS_CONV_2D = 3,
62   ANEURALNETWORKS_DEPTHWISE_CONV_2D = 4,
63   ANEURALNETWORKS_DEPTH_TO_SPACE = 5,
64   ANEURALNETWORKS_DEQUANTIZE = 6,
65   ANEURALNETWORKS_EMBEDDING_LOOKUP = 7,
66   ANEURALNETWORKS_FLOOR = 8,
67   ANEURALNETWORKS_FULLY_CONNECTED = 9,
68   ANEURALNETWORKS_HASHTABLE_LOOKUP = 10,
69   ANEURALNETWORKS_L2_NORMALIZATION = 11,
70   ANEURALNETWORKS_L2_POOL_2D = 12,
71   ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION = 13,
72   ANEURALNETWORKS_LOGISTIC = 14,
73   ANEURALNETWORKS_LSH_PROJECTION = 15,
74   ANEURALNETWORKS_LSTM = 16,
75   ANEURALNETWORKS_MAX_POOL_2D = 17,
76   ANEURALNETWORKS_MUL = 18,
77   ANEURALNETWORKS_RELU = 19,
78   ANEURALNETWORKS_RELU1 = 20,
79   ANEURALNETWORKS_RELU6 = 21,
80   ANEURALNETWORKS_RESHAPE = 22,
81   ANEURALNETWORKS_RESIZE_BILINEAR = 23,
82   ANEURALNETWORKS_RNN = 24,
83   ANEURALNETWORKS_SOFTMAX = 25,
84   ANEURALNETWORKS_SPACE_TO_DEPTH = 26,
85   ANEURALNETWORKS_SVDF = 27,
86   ANEURALNETWORKS_TANH = 28,
87   ANEURALNETWORKS_BATCH_TO_SPACE_ND = 29,
88   ANEURALNETWORKS_DIV = 30,
89   ANEURALNETWORKS_MEAN = 31,
90   ANEURALNETWORKS_PAD = 32,
91   ANEURALNETWORKS_SPACE_TO_BATCH_ND = 33,
92   ANEURALNETWORKS_SQUEEZE = 34,
93   ANEURALNETWORKS_STRIDED_SLICE = 35,
94   ANEURALNETWORKS_SUB = 36,
95   ANEURALNETWORKS_TRANSPOSE = 37,
96   ANEURALNETWORKS_ABS = 38,
97   ANEURALNETWORKS_ARGMAX = 39,
98   ANEURALNETWORKS_ARGMIN = 40,
99   ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM = 42,
100   ANEURALNETWORKS_CAST = 45,
101   ANEURALNETWORKS_EQUAL = 48,
102   ANEURALNETWORKS_EXP = 49,
103   ANEURALNETWORKS_EXPAND_DIMS = 50,
104   ANEURALNETWORKS_GATHER = 51,
105   ANEURALNETWORKS_GREATER = 53,
106   ANEURALNETWORKS_GREATER_EQUAL = 54,
107   ANEURALNETWORKS_LESS = 58,
108   ANEURALNETWORKS_LESS_EQUAL = 59,
109   ANEURALNETWORKS_LOG = 60,
110   ANEURALNETWORKS_LOGICAL_AND = 61,
111   ANEURALNETWORKS_LOGICAL_NOT = 62,
112   ANEURALNETWORKS_LOGICAL_OR = 63,
113   ANEURALNETWORKS_LOG_SOFTMAX = 64,
114   ANEURALNETWORKS_MAXIMUM = 65,
115   ANEURALNETWORKS_MINIMUM = 66,
116   ANEURALNETWORKS_NEG = 67,
117   ANEURALNETWORKS_NOT_EQUAL = 68,
118   ANEURALNETWORKS_PAD_V2 = 69,
119   ANEURALNETWORKS_POW = 70,
120   ANEURALNETWORKS_PRELU = 71,
121   ANEURALNETWORKS_QUANTIZE = 72,
122   ANEURALNETWORKS_QUANTIZED_16BIT_LSTM = 73,
123   ANEURALNETWORKS_REDUCE_ANY = 76,
124   ANEURALNETWORKS_REDUCE_MAX = 77,
125   ANEURALNETWORKS_REDUCE_MIN = 78,
126   ANEURALNETWORKS_REDUCE_PROD = 79,
127   ANEURALNETWORKS_REDUCE_SUM = 80,
128   ANEURALNETWORKS_RSQRT = 83,
129   ANEURALNETWORKS_SELECT = 84,
130   ANEURALNETWORKS_SIN = 85,
131   ANEURALNETWORKS_SLICE = 86,
132   ANEURALNETWORKS_SPLIT = 87,
133   ANEURALNETWORKS_SQRT = 88,
134   ANEURALNETWORKS_TILE = 89,
135   ANEURALNETWORKS_TOPK_V2 = 90,
136   ANEURALNETWORKS_TRANSPOSE_CONV = 91,
137   ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM = 92,
138   ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN = 93,
139   ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR = 94,
140   ANEURALNETWORKS_QUANTIZED_LSTM = 95,
141   ANEURALNETWORKS_IF = 96,
142   ANEURALNETWORKS_WHILE = 97,
143   ANEURALNETWORKS_ELU = 98,
144   ANEURALNETWORKS_HARD_SWISH = 99,
145   ANEURALNETWORKS_FILL = 100,
146   ANEURALNETWORKS_RANK = 101,
147 };
148 
149 /**
150  * Fused activation function types.
151  *
152  */
153 enum {
154   ANEURALNETWORKS_FUSED_NONE = 0,
155   ANEURALNETWORKS_FUSED_RELU = 1,
156   ANEURALNETWORKS_FUSED_RELU1 = 2,
157   ANEURALNETWORKS_FUSED_RELU6 = 3,
158 };
159 
160 /**
161  * Execution preferences.
162  */
163 enum {
164   ANEURALNETWORKS_PREFER_LOW_POWER = 0,
165   ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1,
166   ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2,
167 };
168 
169 /**
170  * Result codes.
171  */
172 // LINT.IfChange
173 enum {
174   ANEURALNETWORKS_NO_ERROR = 0,
175   ANEURALNETWORKS_OUT_OF_MEMORY = 1,
176   ANEURALNETWORKS_INCOMPLETE = 2,
177   ANEURALNETWORKS_UNEXPECTED_NULL = 3,
178   ANEURALNETWORKS_BAD_DATA = 4,
179   ANEURALNETWORKS_OP_FAILED = 5,
180   ANEURALNETWORKS_BAD_STATE = 6,
181   ANEURALNETWORKS_UNMAPPABLE = 7,
182   ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE = 8,
183   ANEURALNETWORKS_UNAVAILABLE_DEVICE = 9,
184   ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT = 10,
185   ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT = 11,
186   ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT = 12,
187   ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT = 13,
188   ANEURALNETWORKS_DEAD_OBJECT = 14,
189 };
190 // LINT.ThenChange(//tensorflow/lite/delegates/nnapi/nnapi_delegate.cc:NnApiErrorDescription)
191 
192 /**
193  * Implicit padding algorithms.
194  */
195 enum {
196   ANEURALNETWORKS_PADDING_SAME = 1,
197   ANEURALNETWORKS_PADDING_VALID = 2,
198 };
199 
200 /**
201  * Device types.
202  *
203  * The type of NNAPI device.
204  */
205 enum {
206   /** The device type cannot be provided. */
207   ANEURALNETWORKS_DEVICE_UNKNOWN = 0,
208   /** The device does not fall into any category below. */
209   ANEURALNETWORKS_DEVICE_OTHER = 1,
210   /** The device runs NNAPI models on single or multi-core CPU. */
211   ANEURALNETWORKS_DEVICE_CPU = 2,
212   /** The device can run NNAPI models and also accelerate graphics APIs such
213    * as OpenGL ES and Vulkan. */
214   ANEURALNETWORKS_DEVICE_GPU = 3,
215   /** Dedicated accelerator for Machine Learning workloads. */
216   ANEURALNETWORKS_DEVICE_ACCELERATOR = 4,
217 };
218 
219 /**
220  * Relative execution priority.
221  *
222  * Available since API level 30.
223  */
224 enum {
225   ANEURALNETWORKS_PRIORITY_LOW = 90,
226   ANEURALNETWORKS_PRIORITY_MEDIUM = 100,
227   ANEURALNETWORKS_PRIORITY_HIGH = 110,
228   ANEURALNETWORKS_PRIORITY_DEFAULT = ANEURALNETWORKS_PRIORITY_MEDIUM,
229 };
230 /**
231  * ANeuralNetworksMemoryDesc is an opaque type that represents a memory
232  * descriptor.
233  *
234  * A memory descriptor describes the properties of a memory object, and is used
235  * by
236  * {@link ANeuralNetworksMemory_createFromDesc}.
237  *
238  * To use:
239  *   - Create a new memory descriptor by calling
240  *     {@link ANeuralNetworksMemoryDesc_create}.
241  *   - Specify all of the intended input and output roles by calling
242  *     {@link ANeuralNetworksMemoryDesc_addInputRole} and
243  *     {@link ANeuralNetworksMemoryDesc_addOutputRole}.
244  *   - Optionally, specify the memory dimensions by calling
245  *     {@link ANeuralNetworksMemoryDesc_setDimensions}.
246  *   - Complete the memory descriptor with {@link
247  * ANeuralNetworksMemoryDesc_finish}.
248  *   - Use the memory descriptor as many times as needed with
249  *     {@link ANeuralNetworksMemory_createFromDesc}.
250  *   - Destroy the memory descriptor with {@link
251  * ANeuralNetworksMemoryDesc_free}.
252  *
253  * A memory descriptor is completed by calling {@link
254  * ANeuralNetworksMemoryDesc_finish}. A memory descriptor is destroyed by
255  * calling {@link ANeuralNetworksMemoryDesc_free}.
256  *
257  * A memory descriptor must not be modified once
258  * {@link ANeuralNetworksMemoryDesc_finish}
259  * has been called on it.
260  *
261  * It is the application's responsibility to make sure that only
262  * one thread modifies a memory descriptor at a given time. It is however
263  * safe for more than one thread to use the memory descriptor once
264  * {@link ANeuralNetworksMemoryDesc_finish} has returned.
265  *
266  * It is also the application's responsibility to ensure that there are no other
267  * uses of the memory descriptor after calling {@link
268  * ANeuralNetworksMemoryDesc_free}. It is however safe to continue using a
269  * {@link ANeuralNetworksMemory} object created from the memory descriptor.
270  *
271  * Available since API level 30.
272  */
273 typedef struct ANeuralNetworksMemoryDesc ANeuralNetworksMemoryDesc;
274 
275 /**
276  * ANeuralNetworksMemory is an opaque type that represents memory.
277  *
278  * This type is used to represent shared memory, memory mapped files,
279  * and similar memories.
280  *
281  * By using shared memory, a program can efficiently communicate to the
282  * runtime and drivers the tensors that define a model. See
283  * {@link ANeuralNetworksModel_setOperandValueFromMemory}. An application
284  * should typically create one shared memory object that contains every tensor
285  * needed to define a model. {@link ANeuralNetworksMemory_createFromFd} can be
286  * used to create shared memory from a file handle. {@link
287  * ANeuralNetworksMemory_createShared} can be used to directly created shared
288  * memory.
289  *
290  * Memory objects can also be used to specify the input and output arguments of
291  * an execution. See {@link ANeuralNetworksExecution_setInputFromMemory}
292  * and {@link ANeuralNetworksExecution_setOutputFromMemory}.
293  */
294 typedef struct ANeuralNetworksMemory ANeuralNetworksMemory;
295 
296 /**
297  * ANeuralNetworksModel is an opaque type that contains a description of the
298  * mathematical operations that constitute the model.
299  *
300  * <p>The model will be built by calling<ul>
301  * <li>{@link ANeuralNetworksModel_create},</li>
302  * <li>{@link ANeuralNetworksModel_addOperation},</li>
303  * <li>{@link ANeuralNetworksModel_addOperand},</li>
304  * </ul>
305  *
306  * A model is completed by calling {@link ANeuralNetworksModel_finish}.
307  * A model is destroyed by calling {@link ANeuralNetworksModel_free}.
308  *
309  * <p>It is the application's responsibility to make sure that only one thread
310  * modifies a model at a given time. It is however safe for more than one
311  * thread to use the model once {@link ANeuralNetworksModel_finish} has
312  * returned.</p>
313  *
314  * <p>It is also the application's responsibility to ensure that there are no
315  * other uses of the model after calling {@link ANeuralNetworksModel_free}. This
316  * includes any compilation or execution object created using the model.</p>
317  */
318 typedef struct ANeuralNetworksModel ANeuralNetworksModel;
319 
320 /**
321  * ANeuralNetworksCompilation is an opaque type that can be used to compile
322  * a machine learning model.
323  *
324  * <p>To use:<ul>
325  *    <li>Create a new compilation instance by calling the
326  *        {@link ANeuralNetworksCompilation_create} function.</li>
327  *    <li>Perform the compilation with {@link
328  * ANeuralNetworksCompilation_start}.</li> <li>Wait for the compilation to
329  * complete with {@link ANeuralNetworksCompilation_wait}.</li> <li>Use the
330  * compilation as many times as needed with {@link
331  * ANeuralNetworksExecution_create}.</li> <li>Destroy the compilation with
332  * {@link ANeuralNetworksCompilation_free} once all executions using the
333  * compilation have completed.</li></ul></p>
334  *
335  * <p>A compilation cannot be modified once {@link
336  * ANeuralNetworksCompilation_start} has been called on it.</p>
337  *
338  * <p>It is the application's responsibility to make sure that only one thread
339  * modifies a compilation at a given time. It is however safe for more than one
340  * thread to use {@link ANeuralNetworksCompilation_wait} at the same time.
341  * It is also safe for multiple threads to use a compilation object once
342  * {@link ANeuralNetworksCompilation_wait} has completed.</p>
343  *
344  * <p>It is also the application's responsibility to ensure that there are no
345  * other uses of the compilation after calling {@link
346  * ANeuralNetworksCompilation_free}. This includes any execution object created
347  * using the compilation.</p>
348  */
349 typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation;
350 
351 /**
352  * ANeuralNetworksExecution is an opaque type that can be used to apply a
353  * machine learning model to a set of inputs.
354  *
355  * <p>To use:<ul>
356  *    <li>Create a new execution instance by calling the
357  *        {@link ANeuralNetworksExecution_create} function.</li>
358  *    <li>Associate data to the model inputs with
359  *        {@link ANeuralNetworksExecution_setInput} or
360  *        {@link ANeuralNetworksExecution_setInputFromMemory}.</li>
361  *    <li>Associate output buffers to the model outputs with
362  *        {@link ANeuralNetworksExecution_setOutput} or
363  *        {@link ANeuralNetworksExecution_setOutputFromMemory}.</li>
364  *    <li>Apply the model with {@link
365  * ANeuralNetworksExecution_startCompute}.</li> <li>Wait for the execution to
366  * complete with {@link ANeuralNetworksExecution_wait}.</li> <li>Destroy the
367  * execution with
368  *        {@link ANeuralNetworksExecution_free}.</li></ul></p>
369  *
370  * <p>An execution cannot be modified once {@link
371  * ANeuralNetworksExecution_start} has been called on it.</p>
372  *
373  * <p>An execution can be applied to a model with
374  * {@link ANeuralNetworksExecution_startCompute} only once. Create new
375  * executions to do new evaluations of the model.</p>
376  *
377  * <p>It is the application's responsibility to make sure that only one thread
378  * modifies an execution at a given time. It is however safe for more than one
379  * thread to use {@link ANeuralNetworksExecution_wait} at the same time.</p>
380  *
381  * <p>It is also the application's responsibility to ensure that there are no
382  * other uses of the request after calling {@link
383  * ANeuralNetworksRequest_free}.</p>
384  */
385 typedef struct ANeuralNetworksExecution ANeuralNetworksExecution;
386 
387 /**
388  * Parameters for ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL operand.
389  */
390 typedef struct ANeuralNetworksSymmPerChannelQuantParams {
391   /* The index of the channel dimension. */
392   uint32_t channelDim;
393   /** The size of the scale array. Should be equal to dimension[channelDim] of
394    * the Operand. */
395   uint32_t scaleCount;
396   /** The array of scaling values for each channel. Each value must be greater
397    * than zero. */
398   const float* scales;
399 } ANeuralNetworksSymmPerChannelQuantParams;
400 
401 /**
402  * ANeuralNetworksBurst is an opaque type that can be used to reduce the latency
403  * of a rapid sequence of executions. It will likely cause overhead if only used
404  * for a single execution.
405  *
406  * ANeuralNetworksBurst serves as a context object for any number of inferences
407  * using {@link ANeuralNetworksExecution} objects. An ANeuralNetworksBurst
408  * object and the {@link ANeuralNetworksExecution} objects used with it must all
409  * have been created from the same {@link ANeuralNetworksCompilation} object.
410  *
411  * This object is also used as a hint to drivers, providing insight to the
412  * lifetime of a rapid sequence of executions. For example, a driver may choose
413  * to increase the clock frequency of its accelerator for the lifetime of a
414  * burst object.
415  *
416  * <p>To use:<ul>
417  *    <li>Create a new burst object by calling the
418  *        {@link ANeuralNetworksBurst_create} function.</li>
419  *    <li>For each execution:</li><ul>
420  *        <li>Create {@link ANeuralNetworksExecution} and configure its
421  *            properties (see {@link ANeuralNetworksExecution} for
422  * details).</li> <li>Apply the model synchronously with
423  *            {@link ANeuralNetworksExecution_burstCompute}, reusing the same
424  *            {@link ANeuralNetworksBurst} with the new
425  *            {@link ANeuralNetworksExecution}.</li>
426  *        <li>Use and free the {@link ANeuralNetworksExecution}.</li></ul>
427  *    <li>Destroy the burst with
428  *        {@link ANeuralNetworksBurst_free}.</li></ul></p>
429  *
430  * Available since API level 29.
431  */
432 typedef struct ANeuralNetworksBurst ANeuralNetworksBurst;
433 
434 /**
435  * ANeuralNetworksOperandType describes the type of an operand.
436  * This structure is used to describe both scalars and tensors.
437  */
438 typedef struct ANeuralNetworksOperandType {
439   /** The data type, e.g ANEURALNETWORKS_INT8. */
440   int32_t type;
441   /** The number of dimensions. It should be 0 for scalars. */
442   uint32_t dimensionCount;
443   /** The dimensions of the tensor. It should be nullptr for scalars. */
444   const uint32_t* dimensions;
445   /** These two fields are only used for quantized tensors.
446    * They should be zero for scalars and non-fixed point tensors.
447    * The dequantized value of each entry is (value - offset) * scale.
448    */
449   float scale;
450   int32_t zeroPoint;
451 } ANeuralNetworksOperandType;
452 
453 /**
454  * ANeuralNetworksEvent is an opaque type that represents an event
455  * that will be signaled once an execution completes.
456  */
457 typedef struct ANeuralNetworksEvent ANeuralNetworksEvent;
458 
459 typedef int32_t ANeuralNetworksOperationType;
460 
461 /**
462  * ANeuralNetworksDevice is an opaque type that represents a device.
463  *
464  * This type is used to query basic properties and supported operations of the
465  * corresponding device, and control which device(s) a model is to be run on.
466  *
467  * Available since API level 29.
468  */
469 typedef struct ANeuralNetworksDevice ANeuralNetworksDevice;
470 
471 // nn api function types
472 
473 typedef int (*ANeuralNetworksMemory_createFromFd_fn)(
474     size_t size, int protect, int fd, size_t offset,
475     ANeuralNetworksMemory** memory);
476 
477 typedef void (*ANeuralNetworksMemory_free_fn)(ANeuralNetworksMemory* memory);
478 
479 typedef int (*ANeuralNetworksModel_create_fn)(ANeuralNetworksModel** model);
480 
481 typedef int (*ANeuralNetworksModel_finish_fn)(ANeuralNetworksModel* model);
482 
483 typedef void (*ANeuralNetworksModel_free_fn)(ANeuralNetworksModel* model);
484 
485 typedef int (*ANeuralNetworksCompilation_create_fn)(
486     ANeuralNetworksModel* model, ANeuralNetworksCompilation** compilation);
487 
488 typedef void (*ANeuralNetworksCompilation_free_fn)(
489     ANeuralNetworksCompilation* compilation);
490 
491 typedef int (*ANeuralNetworksCompilation_setPreference_fn)(
492     ANeuralNetworksCompilation* compilation, int32_t preference);
493 
494 typedef int (*ANeuralNetworksCompilation_finish_fn)(
495     ANeuralNetworksCompilation* compilation);
496 
497 typedef int (*ANeuralNetworksModel_addOperand_fn)(
498     ANeuralNetworksModel* model, const ANeuralNetworksOperandType* type);
499 
500 typedef int (*ANeuralNetworksModel_setOperandValue_fn)(
501     ANeuralNetworksModel* model, int32_t index, const void* buffer,
502     size_t length);
503 
504 typedef int (*ANeuralNetworksModel_setOperandSymmPerChannelQuantParams_fn)(
505     ANeuralNetworksModel* model, int32_t index,
506     const ANeuralNetworksSymmPerChannelQuantParams* channelQuant);
507 
508 typedef int (*ANeuralNetworksModel_setOperandValueFromMemory_fn)(
509     ANeuralNetworksModel* model, int32_t index,
510     const ANeuralNetworksMemory* memory, size_t offset, size_t length);
511 
512 typedef int (*ANeuralNetworksModel_addOperation_fn)(
513     ANeuralNetworksModel* model, ANeuralNetworksOperationType type,
514     uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount,
515     const uint32_t* outputs);
516 
517 typedef int (*ANeuralNetworksModel_identifyInputsAndOutputs_fn)(
518     ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs,
519     uint32_t outputCount, const uint32_t* outputs);
520 
521 typedef int (*ANeuralNetworksModel_relaxComputationFloat32toFloat16_fn)(
522     ANeuralNetworksModel* model, bool allow);
523 
524 typedef int (*ANeuralNetworksExecution_create_fn)(
525     ANeuralNetworksCompilation* compilation,
526     ANeuralNetworksExecution** execution);
527 
528 typedef void (*ANeuralNetworksExecution_free_fn)(
529     ANeuralNetworksExecution* execution);
530 
531 typedef int (*ANeuralNetworksExecution_setInput_fn)(
532     ANeuralNetworksExecution* execution, int32_t index,
533     const ANeuralNetworksOperandType* type, const void* buffer, size_t length);
534 
535 typedef int (*ANeuralNetworksExecution_setInputFromMemory_fn)(
536     ANeuralNetworksExecution* execution, int32_t index,
537     const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory,
538     size_t offset, size_t length);
539 
540 typedef int (*ANeuralNetworksExecution_setOutput_fn)(
541     ANeuralNetworksExecution* execution, int32_t index,
542     const ANeuralNetworksOperandType* type, void* buffer, size_t length);
543 
544 typedef int (*ANeuralNetworksExecution_setOutputFromMemory_fn)(
545     ANeuralNetworksExecution* execution, int32_t index,
546     const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory,
547     size_t offset, size_t length);
548 
549 typedef int (*ANeuralNetworksExecution_startCompute_fn)(
550     ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event);
551 
552 typedef int (*ANeuralNetworksEvent_wait_fn)(ANeuralNetworksEvent* event);
553 
554 typedef void (*ANeuralNetworksEvent_free_fn)(ANeuralNetworksEvent* event);
555 
556 typedef int (*ASharedMemory_create_fn)(const char* name, size_t size);
557 
558 typedef int (*ANeuralNetworks_getDeviceCount_fn)(uint32_t* numDevices);
559 
560 typedef int (*ANeuralNetworks_getDevice_fn)(uint32_t devIndex,
561                                             ANeuralNetworksDevice** device);
562 
563 typedef int (*ANeuralNetworksDevice_getName_fn)(
564     const ANeuralNetworksDevice* device, const char** name);
565 
566 typedef int (*ANeuralNetworksDevice_getType_fn)(
567     const ANeuralNetworksDevice* device, int32_t* type);
568 
569 typedef int (*ANeuralNetworksDevice_getVersion_fn)(
570     const ANeuralNetworksDevice* device, const char** version);
571 
572 typedef int (*ANeuralNetworksDevice_getFeatureLevel_fn)(
573     const ANeuralNetworksDevice* device, int64_t* featureLevel);
574 
575 typedef int (*ANeuralNetworksModel_getSupportedOperationsForDevices_fn)(
576     const ANeuralNetworksModel* model,
577     const ANeuralNetworksDevice* const* devices, uint32_t numDevices,
578     bool* supportedOps);
579 
580 typedef int (*ANeuralNetworksCompilation_createForDevices_fn)(
581     ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices,
582     uint32_t numDevices, ANeuralNetworksCompilation** compilation);
583 
584 typedef int (*ANeuralNetworksCompilation_setCaching_fn)(
585     ANeuralNetworksCompilation* compilation, const char* cacheDir,
586     const uint8_t* token);
587 
588 typedef int (*ANeuralNetworksCompilation_setTimeout_fn)(
589     ANeuralNetworksCompilation* compilation, uint64_t duration);
590 
591 typedef int (*ANeuralNetworksCompilation_setPriority_fn)(
592     ANeuralNetworksCompilation* compilation, int priority);
593 
594 typedef int (*ANeuralNetworksExecution_compute_fn)(
595     ANeuralNetworksExecution* execution);
596 
597 typedef int (*ANeuralNetworksExecution_setTimeout_fn)(
598     ANeuralNetworksExecution* execution, uint64_t duration);
599 
600 typedef int (*ANeuralNetworksExecution_setLoopTimeout_fn)(
601     ANeuralNetworksExecution* execution, uint64_t duration);
602 
603 typedef int (*ANeuralNetworksExecution_getOutputOperandRank_fn)(
604     ANeuralNetworksExecution* execution, int32_t index, uint32_t* rank);
605 
606 typedef int (*ANeuralNetworksExecution_getOutputOperandDimensions_fn)(
607     ANeuralNetworksExecution* execution, int32_t index, uint32_t* dimensions);
608 
609 typedef int (*ANeuralNetworksBurst_create_fn)(
610     ANeuralNetworksCompilation* compilation, ANeuralNetworksBurst** burst);
611 
612 typedef void (*ANeuralNetworksBurst_free_fn)(ANeuralNetworksBurst* burst);
613 
614 typedef int (*ANeuralNetworksExecution_burstCompute_fn)(
615     ANeuralNetworksExecution* execution, ANeuralNetworksBurst* burst);
616 
617 typedef int (*ANeuralNetworksMemory_createFromAHardwareBuffer_fn)(
618     const AHardwareBuffer* ahwb, ANeuralNetworksMemory** memory);
619 
620 typedef int (*ANeuralNetworksExecution_setMeasureTiming_fn)(
621     ANeuralNetworksExecution* execution, bool measure);
622 
623 typedef enum {
624   // Execution time on hardware (not driver, which runs on host processor).
625   ANEURALNETWORKS_DURATION_ON_HARDWARE = 0,
626   // Execution time in driver (including time on hardware).  Excludes overhead
627   // such as that of the runtime itself and the IPC needed for the runtime to
628   // communicate with the driver.
629   ANEURALNETWORKS_DURATION_IN_DRIVER = 1,
630   // Execution time on hardware, after all dependencies have been signaled.
631   // If no dependencies specified (for example, if the execution was scheduled
632   // other
633   // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}),
634   // the
635   // reported time will be the same as ANEURALNETWORKS_DURATION_ON_HARDWARE.
636   // Available since API level 30.
637   ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE = 2,
638   // Execution time in driver, after all dependencies have been signaled.
639   // Excludes
640   // overhead such as that of the runtime itself and the IPC needed for the
641   // runtime
642   // to communicate with the driver.
643   // If no dependencies specified (for example, if the execution was scheduled
644   // other
645   // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}),
646   // the
647   // reported time will be the same as ANEURALNETWORKS_DURATION_IN_DRIVER.
648   // Available since API level 30.
649   ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER = 3,
650 } DurationCode;
651 
652 typedef int (*ANeuralNetworksExecution_getDuration_fn)(
653     const ANeuralNetworksExecution* execution, int32_t durationCode,
654     uint64_t* duration);
655 
656 typedef int (*ANeuralNetworksDevice_getExtensionSupport_fn)(
657     const ANeuralNetworksDevice* device, const char* extensionName,
658     bool* isExtensionSupported);
659 
660 typedef int (*ANeuralNetworksModel_getExtensionOperandType_fn)(
661     ANeuralNetworksModel* model, const char* extensionName,
662     uint16_t operandCodeWithinExtension, int32_t* type);
663 
664 typedef int (*ANeuralNetworksModel_getExtensionOperationType_fn)(
665     ANeuralNetworksModel* model, const char* extensionName,
666     uint16_t operationCodeWithinExtension, ANeuralNetworksOperationType* type);
667 
668 typedef int (*ANeuralNetworksModel_setOperandExtensionData_fn)(
669     ANeuralNetworksModel* model, int32_t index, const void* data,
670     size_t length);
671 
672 typedef int (*ANeuralNetworksMemoryDesc_create_fn)(
673     ANeuralNetworksMemoryDesc** desc);
674 
675 typedef void (*ANeuralNetworksMemoryDesc_free_fn)(
676     ANeuralNetworksMemoryDesc* desc);
677 
678 typedef int (*ANeuralNetworksMemoryDesc_addInputRole_fn)(
679     ANeuralNetworksMemoryDesc* desc,
680     const ANeuralNetworksCompilation* compilation, int32_t index,
681     float frequency);
682 
683 typedef int (*ANeuralNetworksMemoryDesc_addOutputRole_fn)(
684     ANeuralNetworksMemoryDesc* desc,
685     const ANeuralNetworksCompilation* compilation, uint32_t index,
686     float frequency);
687 
688 typedef int (*ANeuralNetworksMemoryDesc_setDimensions_fn)(
689     ANeuralNetworksMemoryDesc* desc, uint32_t rank, const uint32_t* dimensions);
690 
691 typedef int (*ANeuralNetworksMemoryDesc_finish_fn)(
692     ANeuralNetworksMemoryDesc* desc);
693 
694 typedef int (*ANeuralNetworksMemory_createFromDesc_fn)(
695     const ANeuralNetworksMemoryDesc* desc, ANeuralNetworksMemory** memory);
696 
697 typedef int (*ANeuralNetworksMemory_copy_fn)(const ANeuralNetworksMemory* src,
698                                              const ANeuralNetworksMemory* dst);
699 
700 typedef int (*ANeuralNetworksEvent_createFromSyncFenceFd_fn)(
701     int sync_fence_fd, ANeuralNetworksEvent** event);
702 
703 typedef int (*ANeuralNetworksEvent_getSyncFenceFd_fn)(
704     const ANeuralNetworksEvent* event, int* sync_fence_fd);
705 
706 typedef int (*ANeuralNetworksExecution_startComputeWithDependencies_fn)(
707     ANeuralNetworksExecution* execution,
708     const ANeuralNetworksEvent* const* dependencies, uint32_t num_dependencies,
709     uint64_t duration, ANeuralNetworksEvent** event);
710 
711 #endif  // TENSORFLOW_LITE_NNAPI_NEURALNETWORKSTYPES_H_
712