• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2022 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /**
18  * NN api tyes based on NNAPI header file: https://developer.android.com/ndk/reference/group/neural-networks
19  */
20 
21 /**
22  * @addtogroup NeuralNetworks
23  * @{
24  */
25 
26 /**
27  * @file NeuralNetworksTypes.h
28  */
29 
30 #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_TYPES_H
31 #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_TYPES_H
32 
33 /******************************************************************
34  *
35  * IMPORTANT NOTICE:
36  *
37  *   This file is part of Android's set of stable system headers
38  *   exposed by the Android NDK (Native Development Kit).
39  *
40  *   Third-party source AND binary code relies on the definitions
41  *   here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES.
42  *
43  *   - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES)
44  *   - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS
45  *   - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY
46  *   - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES
47  */
48 
49 #include <stdbool.h>
50 #include <stddef.h>
51 #include <stdint.h>
52 #include <sys/cdefs.h>
53 
54 #ifdef __ANDROID__
55 #include <android/hardware_buffer.h>
56 #endif  // __ANDROID__
57 
58 __BEGIN_DECLS
59 
60 /**
61  * Operand types.
62  *
63  * The type of an operand in a model.
64  *
65  * Types prefaced with ANEURALNETWORKS_TENSOR_* must be used for tensor data (i.e., tensors
66  * with at least one dimension). Types not prefaced by ANEURALNETWORKS_TENSOR_* represent
67  * scalar values and must have no dimensions.
68  *
69  * Although we define many types, most operators accept just a few
70  * types. Most used are {@link ANEURALNETWORKS_TENSOR_FLOAT32},
71  * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
72  * and {@link ANEURALNETWORKS_INT32}.
73  *
74  * Available since NNAPI feature level 1.
75  */
76 typedef enum {
77   /** A 32 bit floating point scalar value. */
78   ANEURALNETWORKS_FLOAT32 = 0,
79   /** A signed 32 bit integer scalar value. */
80   ANEURALNETWORKS_INT32 = 1,
81   /** An unsigned 32 bit integer scalar value. */
82   ANEURALNETWORKS_UINT32 = 2,
83   /** A tensor of 32 bit floating point values. */
84   ANEURALNETWORKS_TENSOR_FLOAT32 = 3,
85   /** A tensor of 32 bit integer values. */
86   ANEURALNETWORKS_TENSOR_INT32 = 4,
87   /**
88    * A tensor of 8 bit unsigned integers that represent real numbers.
89    *
90    * Attached to this tensor are two numbers that can be used to convert the
91    * 8 bit integer to the real value and vice versa. These two numbers are:
92    * - scale: a 32 bit floating point value greater than zero.
93    * - zeroPoint: a 32 bit integer, in range [0, 255].
94    *
95    * The formula is:
96    *   real_value = (integer_value - zeroPoint) * scale.
97    */
98   ANEURALNETWORKS_TENSOR_QUANT8_ASYMM = 5,
99   /**
100    * An 8 bit boolean scalar value.
101    *
102    * Values of this operand type are either true or false. A zero value
103    * represents false; any other value represents true.
104    *
105    * Available since NNAPI feature level 3.
106    */
107   ANEURALNETWORKS_BOOL = 6,
108   /**
109    * A tensor of 16 bit signed integers that represent real numbers.
110    *
111    * Attached to this tensor is a number representing real value scale that is
112    * used to convert the 16 bit number to a real value in the following way:
113    * realValue = integerValue * scale.
114    *
115    * scale is a 32 bit floating point with value greater than zero.
116    *
117    * Available since NNAPI feature level 3.
118    */
119   ANEURALNETWORKS_TENSOR_QUANT16_SYMM = 7,
120   /**
121    * A tensor of IEEE 754 16 bit floating point values.
122    *
123    * Available since NNAPI feature level 3.
124    */
125   ANEURALNETWORKS_TENSOR_FLOAT16 = 8,
126   /**
127    * A tensor of 8 bit boolean values.
128    *
129    * Values of this operand type are either true or false. A zero value
130    * represents false; any other value represents true.
131    *
132    * Available since NNAPI feature level 3.
133    */
134   ANEURALNETWORKS_TENSOR_BOOL8 = 9,
135   /**
136    * An IEEE 754 16 bit floating point scalar value.
137    *
138    * Available since NNAPI feature level 3.
139    */
140   ANEURALNETWORKS_FLOAT16 = 10,
141   /**
142    * A tensor of 8 bit signed integers that represent real numbers.
143    *
144    * This tensor is associated with additional fields that can
145    * be used to convert the 8 bit signed integer to the real value and vice versa.
146    * These fields are:
147    * - channelDim: a 32 bit unsigned integer indicating channel dimension.
148    * - scales: an array of positive 32 bit floating point values.
149    * The size of the scales array must be equal to dimensions[channelDim].
150    *
151    * {@link ANeuralNetworksModel_setOperandSymmPerChannelQuantParams} must be used
152    * to set the parameters for an Operand of this type.
153    *
154    * The channel dimension of this tensor must not be unknown (dimensions[channelDim] != 0).
155    *
156    * The formula is:
157    * realValue[..., C, ...] =
158    *     integerValue[..., C, ...] * scales[C]
159    * where C is an index in the Channel dimension.
160    *
161    * Available since NNAPI feature level 3.
162    */
163   ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL = 11,
164   /**
165    * A tensor of 16 bit unsigned integers that represent real numbers.
166    *
167    * Attached to this tensor are two numbers that can be used to convert the
168    * 16 bit integer to the real value and vice versa. These two numbers are:
169    * - scale: a 32 bit floating point value greater than zero.
170    * - zeroPoint: a 32 bit integer, in range [0, 65535].
171    *
172    * The formula is:
173    * real_value = (integer_value - zeroPoint) * scale.
174    *
175    * Available since NNAPI feature level 3.
176    */
177   ANEURALNETWORKS_TENSOR_QUANT16_ASYMM = 12,
178   /**
179    * A tensor of 8 bit signed integers that represent real numbers.
180    *
181    * Attached to this tensor is a number representing real value scale that is
182    * used to convert the 8 bit number to a real value in the following way:
183    * realValue = integerValue * scale.
184    *
185    * scale is a 32 bit floating point with value greater than zero.
186    *
187    * Available since NNAPI feature level 3.
188    */
189   ANEURALNETWORKS_TENSOR_QUANT8_SYMM = 13,
190   /**
191    * A tensor of 8 bit signed integers that represent real numbers.
192    *
193    * Attached to this tensor are two numbers that can be used to convert the
194    * 8 bit integer to the real value and vice versa. These two numbers are:
195    * - scale: a 32 bit floating point value greater than zero.
196    * - zeroPoint: a 32 bit integer, in range [-128, 127].
197    *
198    * The formula is:
199    * real_value = (integer_value - zeroPoint) * scale.
200    *
201    * Available since NNAPI feature level 4.
202    */
203   ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED = 14,
204   /**
205    * A reference to a model.
206    *
207    * {@link ANeuralNetworksModel_setOperandValueFromModel} must be used to set
208    * the value for an Operand of this type.
209    *
210    * Available since NNAPI feature level 4.
211    */
212   ANEURALNETWORKS_MODEL = 15,
213 } OperandCode;
214 
215 /**
216  * Operation types.
217  *
218  * The type of an operation in a model.
219  *
220  * Available since NNAPI feature level 1.
221  */
222 typedef enum {
223   // Operations below are available since NNAPI feature level 1.
224 
225   /**
226    * Adds two tensors, element-wise.
227    *
228    * Takes two input tensors of identical {@link OperandCode} and compatible
229    * dimensions. The output is the sum of both input tensors, optionally
230    * modified by an activation function.
231    *
232    * Two dimensions are compatible when:
233    *     1. they are equal, or
234    *     2. one of them is 1
235    *
236    * The size of the output is the maximum size along each dimension of the
237    * input operands. It starts with the trailing dimensions, and works its
238    * way forward.
239    *
240    * Example:
241    *
242    *     input1.dimension = {4, 1, 2}
243    *     input2.dimension = {5, 4, 3, 1}
244    *     output.dimension = {5, 4, 3, 2}
245    *
246    * Since NNAPI feature level 3, generic zero-sized input tensor is supported. Zero
247    * dimension is only compatible with 0 or 1. The size of the output
248    * dimension is zero if either of corresponding input dimension is zero.
249    *
250    * Supported tensor {@link OperandCode}:
251    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
252    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
253    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
254    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
255    * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4)
256    *
257    * Supported tensor rank: up to 4
258    *
259    * Inputs:
260    * * 0: A tensor.
261    * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
262    *      as input0.
263    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
264    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
265    *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
266    * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
267    *      {@link FuseCode} values. Specifies the activation to
268    *      invoke on the result.
269    *      For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,
270    *      the {@link FuseCode} must be "NONE".
271    *
272    * Outputs:
273    * * 0: The sum, a tensor of the same {@link OperandCode} as input0.
274    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
275    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
276    *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
277    *
278    * Available since NNAPI feature level 1.
279    */
280   ANEURALNETWORKS_ADD = 0,
281 
282   /**
283    * Performs a 2-D average pooling operation.
284    *
285    * The output dimensions are functions of the filter dimensions, stride, and
286    * padding.
287    *
288    * The values in the output tensor are computed as:
289    *
290    *     output[b, i, j, channel] =
291    *         sum_{di, dj}(
292    *             input[b, strides[1] * i + di, strides[2] * j + dj, channel]
293    *         ) / sum(1)
294    *
295    * Supported tensor {@link OperandCode}:
296    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
297    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
298    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
299    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
300    *
301    * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
302    * With the default data layout NHWC, the data is stored in the order of:
303    * [batch, height, width, channels]. Alternatively, the data layout could
304    * be NCHW, the data storage order of: [batch, channels, height, width].
305    * NCHW is supported since NNAPI feature level 3.
306    *
307    * Both explicit padding and implicit padding are supported.
308    *
309    * Inputs (explicit padding):
310    * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
311    *      the input.
312    *      Since NNAPI feature level 3, zero batches is supported for this tensor.
313    * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
314    *      the left, in the ‘width’ dimension.
315    * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
316    *      the right, in the ‘width’ dimension.
317    * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
318    *      the top, in the ‘height’ dimension.
319    * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
320    *      the bottom, in the ‘height’ dimension.
321    * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
322    *      walking through input in the ‘width’ dimension.
323    * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
324    *      walking through input in the ‘height’ dimension.
325    * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
326    *      width.
327    * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
328    *      height.
329    * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
330    *      {@link FuseCode} values. Specifies the activation to
331    *      invoke on the result.
332    * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
333    *       Set to true to specify NCHW data layout for input0 and output0.
334    *       Available since NNAPI feature level 3.
335    *
336    * Inputs (implicit padding):
337    * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
338    *      the input.
339    *      Since NNAPI feature level 3, zero batches is supported for this tensor.
340    * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
341    *      padding scheme, has to be one of the
342    *      {@link PaddingCode} values.
343    * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
344    *      walking through input in the ‘width’ dimension.
345    * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
346    *      walking through input in the ‘height’ dimension.
347    * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
348    *      width.
349    * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
350    *      height.
351    * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
352    *      {@link FuseCode} values. Specifies the activation to
353    *      invoke on the result.
354    * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
355    *      Set to true to specify NCHW data layout for input0 and output0.
356    *      Available since NNAPI feature level 3.
357    *
358    * Outputs:
359    * * 0: The output 4-D tensor, of shape
360    *      [batches, out_height, out_width, depth].
361    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
362    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
363    *      the scale and zeroPoint must be the same as input0.
364    *
365    * Available since NNAPI feature level 1.
366    */
367   ANEURALNETWORKS_AVERAGE_POOL_2D = 1,
368 
369   /**
370    * Concatenates the input tensors along the given dimension.
371    *
372    * The input tensors must have identical {@link OperandCode} and the same
373    * dimensions except the dimension along the concatenation axis.
374    *
375    * Supported tensor {@link OperandCode}:
376    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
377    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
378    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
379    *   (full support since NNAPI feature level 3, see the input section)
380    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
381    *
382    * Supported tensor rank: up to 4
383    *
384    * Inputs:
385    * * 0 ~ n-1: The list of n input tensors, of shape
386    *            [D0, D1, ..., Daxis(i), ..., Dm].
387    *            Before NNAPI feature level 3, all input tensors of
388    *            {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
389    *            must have the same scale and zeroPoint as the output tensor.
390    *            Input tensors of
391    *            {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
392    *            are allowed to have different scale and zeroPoint.
393    *            Since NNAPI feature level 3, zero-sized tensors are supported.
394    * * n: An {@link ANEURALNETWORKS_INT32} scalar, specifying the
395    *      concatenation axis.
396    *
397    * Outputs:
398    * * 0: The output, a tensor of the same {@link OperandCode} as the input
399    *      tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
400    *      Since NNAPI feature level 3, for a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
401    *      the scale and zeroPoint values can be different from
402    *      input tensors. Before NNAPI feature level 3 they have to be the same as for the
403    *      input tensors.
404    *
405    * Available since NNAPI feature level 1.
406    */
407   ANEURALNETWORKS_CONCATENATION = 2,
408 
409   /**
410    * Performs a 2-D convolution operation.
411    *
412    * The CONV_2D op sweeps a 2-D filter that can mix channels together over a
413    * batch of images, applying the filter to each window of each image of the
414    * appropriate size.
415    *
416    * The output dimensions are functions of the filter dimensions, stride, and
417    * padding.
418    *
419    * The values in the output tensor are computed as:
420    *
421    *     output[b, i, j, channel] =
422    *         sum_{di, dj, k} (
423    *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
424    *             filter[channel, di, dj, k]
425    *         ) + bias[channel]
426    *
427    * Supported tensor {@link OperandCode} configurations:
428    * * 32 bit floating point:
429    * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.
430    *
431    * * Quantized:
432    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.
433    * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
434    * * * input.scale * filter.scale).
435    *
436    * Available since NNAPI feature level 3:
437    * * 16 bit floating point:
438    * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.
439    *
440    * * Quantized with symmetric per channel quantization for the filter:
441    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.
442    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
443    * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
444    * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
445    *
446    * Available since NNAPI feature level 4:
447    * * Quantized signed (since NNAPI feature level 4):
448    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
449    * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
450    * * * input.scale * filter.scale).
451    *
452    * * Quantized signed with filter symmetric per channel quantization
453    *   (since NNAPI feature level 4):
454    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
455    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
456    * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
457    * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
458    *
459    * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
460    * With the default data layout NHWC, the data is stored in the order of:
461    * [batch, height, width, channels]. Alternatively, the data layout could
462    * be NCHW, the data storage order of: [batch, channels, height, width].
463    * NCHW is supported since NNAPI feature level 3.
464    *
465    * Both explicit padding and implicit padding are supported.
466    *
467    * Inputs (explicit padding):
468    * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
469    *      specifying the input.
470    *      Since NNAPI feature level 3, zero batches is supported for this tensor.
471    * * 1: A 4-D tensor, of shape
472    *      [depth_out, filter_height, filter_width, depth_in], specifying the
473    *      filter.
474    *      For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
475    *      the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)
476    *      must be set to 0.
477    * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
478    *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}
479    *      or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type.
480    *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
481    *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
482    *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
483    *      of 0 and bias_scale == input_scale * filter_scale.
484    *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
485    *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
486    *      and bias_scale of 0. The actual scale of each value 'i' is equal to
487    *      bias_scale[i] = input_scale * filter_scale[i].
488    * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
489    *      the left, in the ‘width’ dimension.
490    * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
491    *      the right, in the ‘width’ dimension.
492    * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
493    *      the top, in the ‘height’ dimension.
494    * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
495    *      the bottom, in the ‘height’ dimension.
496    * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
497    *      walking through input in the ‘width’ dimension.
498    * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
499    *      walking through input in the ‘height’ dimension.
500    * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
501    *      {@link FuseCode} values. Specifies the activation to
502    *      invoke on the result.
503    * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
504    *      Set to true to specify NCHW data layout for input0 and output0.
505    *      Available since NNAPI feature level 3.
506    * * 11: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
507    *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
508    *      cells between each filter element on width dimension. If this input is set,
509    *      input 12 (dilation factor for height) must be specified as well.
510    *      Available since NNAPI feature level 3.
511    * * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
512    *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
513    *      cells between each filter element on height dimension. If this input is set,
514    *      input 11 (dilation factor for width) must be specified as well.
515    *      Available since NNAPI feature level 3.
516    *
517    * Inputs (implicit padding):
518    * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
519    *      specifying the input.
520    *      Since NNAPI feature level 3, zero batches is supported for this tensor.
521    * * 1: A 4-D tensor, of shape
522    *      [depth_out, filter_height, filter_width, depth_in], specifying the
523    *      filter.
524    *      For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
525    *      the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)
526    *      must be set to 0.
527    * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
528    *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}
529    *      or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same
530    *      type.
531    *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
532    *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
533    *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
534    *      of 0 and bias_scale == input_scale * filter_scale.
535    *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
536    *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
537    *      and bias_scale of 0. The actual scale of each value 'i' is equal to
538    *      bias_scale[i] = input_scale * filter_scale[i].
539    * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
540    *      padding scheme, has to be one of the
541    *      {@link PaddingCode} values.
542    * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
543    *      walking through input in the ‘width’ dimension.
544    * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
545    *      walking through input in the ‘height’ dimension.
546    * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
547    *      {@link FuseCode} values. Specifies the activation to
548    *      invoke on the result.
549    * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
550    *      Set to true to specify NCHW data layout for input0 and output0.
551    *      Available since NNAPI feature level 3.
552    * * 8: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
553    *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
554    *      cells between each filter element on width dimension. If this input is set,
555    *      input 9 (dilation factor for height) must be specified as well.
556    *      Available since NNAPI feature level 3.
557    * * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
558    *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
559    *      cells between each filter element on height dimension. If this input is set,
560    *      input 8 (dilation factor for width) must be specified as well.
561    *      Available since NNAPI feature level 3.
562    *
563    * Outputs:
564    * * 0: The output 4-D tensor, of shape
565    *      [batches, out_height, out_width, depth_out].
566    *      Before NNAPI feature level 3, for output tensor of
567    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following condition must
568    *      be satisfied: output_scale > input_scale * filter_scale
569    *
570    * Available since NNAPI feature level 1.
571    */
572   ANEURALNETWORKS_CONV_2D = 3,
573 
574   /**
575    * Performs a depthwise 2-D convolution operation.
576    *
577    * Given an input tensor of shape [batches, height, width, depth_in] and a
578    * filter tensor of shape [1, filter_height, filter_width, depth_out]
579    * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV
580    * applies a different filter to each input channel (expanding from 1
581    * channel to channel_multiplier channels for each), then concatenates the
582    * results together.
583    *
584    * The output has depth_out = depth_in * depth_multiplier channels.
585    * The output dimensions are functions of the filter dimensions, stride, and
586    * padding.
587    *
588    * The values in the output tensor are computed as:
589    *
590    *     output[b, i, j, k * channel_multiplier + q] =
591    *         sum_{di, dj} (
592    *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
593    *             filter[1, di, dj, k * channel_multiplier + q]
594    *         ) + bias[k * channel_multiplier + q]
595    *
596    * Supported tensor {@link OperandCode} configurations:
597    * * 32 bit floating point:
598    * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.
599    *
600    * * Quantized:
601    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.
602    * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
603    * * * input.scale * filter.scale).
604    *
605    * Available since NNAPI feature level 3:
606    * * 16 bit floating point:
607    * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.
608    *
609    * * Quantized with symmetric per channel quantization for the filter:
610    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.
611    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
612    * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
613    * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
614    *
615    * Available since NNAPI feature level 4:
616    * * Quantized signed (since NNAPI feature level 4):
617    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
618    * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
619    * * * input.scale * filter.scale).
620    *
621    * * Quantized signed with filter symmetric per channel quantization
622    *   (since NNAPI feature level 4):
623    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
624    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
625    * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
626    * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
627    *
628    * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
629    * With the default data layout NHWC, the data is stored in the order of:
630    * [batch, height, width, channels]. Alternatively, the data layout could
631    * be NCHW, the data storage order of: [batch, channels, height, width].
632    * NCHW is supported since NNAPI feature level 3.
633    *
634    * Both explicit padding and implicit padding are supported.
635    *
636    * Inputs (explicit padding):
637    * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
638    *      specifying the input.
639    * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
640    *      specifying the filter.
641    *      For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
642    *      the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)
643    *      must be set to 3.
644    * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
645    *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}
646    *      or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type.
647    *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
648    *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
649    *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
650    *      of 0 and bias_scale == input_scale * filter_scale.
651    *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
652    *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
653    *      and bias_scale of 0. The actual scale of each value 'i' is equal to
654    *      bias_scale[i] = input_scale * filter_scale[i].
655    * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
656    *      the left, in the ‘width’ dimension.
657    * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
658    *      the right, in the ‘width’ dimension.
659    * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
660    *      the top, in the ‘height’ dimension.
661    * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
662    *      the bottom, in the ‘height’ dimension.
663    * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
664    *      walking through input in the ‘width’ dimension.
665    * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
666    *      walking through input in the ‘height’ dimension.
667    * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise
668    *      multiplier.
669    * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
670    *       {@link FuseCode} values. Specifies the activation to
671    *       invoke on the result.
672    * * 11: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
673    *       Set to true to specify NCHW data layout for input0 and output0.
674    *       Available since NNAPI feature level 3.
675    * * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
676    *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
677    *      cells between each filter element on width dimension. If this input is set,
678    *      input 13 (dilation factor for height) must be specified as well.
679    *      Available since NNAPI feature level 3.
680    * * 13: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
681    *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
682    *      cells between each filter element on height dimension. If this input is set,
683    *      input 12 (dilation factor for width) must be specified as well.
684    *      Available since NNAPI feature level 3.
685    *
686    * Inputs (implicit padding):
687    * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
688    *      specifying the input.
689    * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
690    *      specifying the filter.
691    * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
692    *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}
693    *      or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type.
694    *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
695    *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
696    *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
697    *      of 0 and bias_scale == input_scale * filter_scale.
698    *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
699    *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
700    *      and bias_scale of 0. The actual scale of each value 'i' is equal to
701    *      bias_scale[i] = input_scale * filter_scale[i].
702    * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
703    *      padding scheme, has to be one of the
704    *      {@link PaddingCode} values.
705    * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
706    *      walking through input in the ‘width’ dimension.
707    * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
708    *      walking through input in the ‘height’ dimension.
709    * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise
710    *      multiplier.
711    * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
712    *      {@link FuseCode} values. Specifies the activation to
713    *      invoke on the result.
714    * * 8: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
715    *      Set to true to specify NCHW data layout for input0 and output0.
716    *      Available since NNAPI feature level 3.
717    * * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
718    *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
719    *      cells between each filter element on width dimension. If this input is set,
720    *      input 10 (dilation factor for height) must be specified as well.
721    *      Available since NNAPI feature level 3.
722    * * 10: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
723    *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
724    *      cells between each filter element on height dimension. If this input is set,
725    *      input 9 (dilation factor for width) must be specified as well.
726    *      Available since NNAPI feature level 3.
727    *
728    * Outputs:
729    * * 0: The output 4-D tensor, of shape
730    *      [batches, out_height, out_width, depth_out]. Before NNAPI feature level 3, for
731    *      output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
732    *      the following condition must be satisfied:
733    *      output_scale > input_scale * filter_scale
734    *
735    * Available since NNAPI feature level 1.
736    */
737   ANEURALNETWORKS_DEPTHWISE_CONV_2D = 4,
738 
739   /**
740    * Rearranges data from depth into blocks of spatial data.
741    *
742    * More specifically, this op outputs a copy of the input tensor where
743    * values from the depth dimension are moved in spatial blocks to the height
744    * and width dimensions. The value block_size indicates the input block size
745    * and how the data is moved.
746    *
747    * Chunks of data of size block_size * block_size from depth are rearranged
748    * into non-overlapping blocks of size block_size x block_size.
749    *
750    * The width of the output tensor is input_depth * block_size, whereas the
751    * height is input_height * block_size. The depth of the input tensor must
752    * be divisible by block_size * block_size
753    *
754    * Supported tensor {@link OperandCode}:
755    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
756    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
757    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
758    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
759    *
760    * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
761    * With the default data layout NHWC, the data is stored in the order of:
762    * [batch, height, width, channels]. Alternatively, the data layout could
763    * be NCHW, the data storage order of: [batch, channels, height, width].
764    * NCHW is supported since NNAPI feature level 3.
765    *
766    * Inputs:
767    * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
768    *      specifying the input.
769    * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size.
770    *      block_size must be >=1 and block_size * block_size must be a divisor
771    *      of the input depth.
772    * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
773    *      Set to true to specify NCHW data layout for input0 and output0.
774    *      Available since NNAPI feature level 3.
775    *
776    * Outputs:
777    * * 0: The output 4-D tensor, of shape [batch, height*block_size,
778    *      width*block_size, depth/(block_size*block_size)].
779    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
780    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
781    *      the scale and zeroPoint must be the same as input0.
782    *
783    * Available since NNAPI feature level 1.
784    */
785   ANEURALNETWORKS_DEPTH_TO_SPACE = 5,
786 
787   /**
788    * Dequantizes the input tensor.
789    *
790    * The formula is:
791    *
792    *     output = (input - zeroPoint) * scale.
793    *
794    * Supported input tensor {@link OperandCode}:
795    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
796    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} (since NNAPI feature level 3)
797    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} (since NNAPI feature level 3)
798    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
799    *
800    * Supported output tensor {@link OperandCode}:
801    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
802    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
803    *
804    * Supported tensor rank: up to 4
805    *
806    * Inputs:
807    * * 0: A tensor.
808    *      Since NNAPI feature level 3, this tensor may be zero-sized.
809    *
810    * Outputs:
811    * * 0: A tensor with the same shape as input0.
812    *
813    * Available since NNAPI feature level 1.
814    */
815   ANEURALNETWORKS_DEQUANTIZE = 6,
816 
817   /**
818    * Looks up sub-tensors in the input tensor.
819    *
820    * This operator takes for input a tensor of values (Values) and
821    * a one-dimensional tensor of selection indices (Lookups).
822    * The output tensor is the concatenation of sub-tensors of Values as
823    * selected by Lookups.
824    *
825    * Think of Values as being sliced along its first dimension:
826    * The entries in Lookups select which slices are concatenated together
827    * to create the output tensor.
828    *
829    * For example, if Values has shape of [40, 200, 300] and
830    * Lookups has shape of [3], all three values found in Lookups are
831    * expected to be between 0 and 39. The resulting tensor must
832    * have shape of [3, 200, 300].
833    *
834    * If a value in Lookups is out of bounds, the operation must fail
835    * and an error must be reported.
836    *
837    * Supported value tensor {@link OperandCode}:
838    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 4)
839    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
840    * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 3)
841    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3)
842    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
843    *
844    * Supported value tensor rank: from 2
845    *
846    * Inputs:
847    * * 0: Lookups. A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}.
848    *      The values are indices into the first dimension of Values.
849    * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are
850    *      extracted.
851    *
852    * Output:
853    * * 0: A n-D tensor with the same rank and shape as the Values
854    *      tensor, except for the first dimension which has the same size
855    *      as Lookups' only dimension.
856    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
857    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
858    *      the scale and zeroPoint must be the same as input1.
859    *
860    * Available since NNAPI feature level 1.
861    */
862   ANEURALNETWORKS_EMBEDDING_LOOKUP = 7,
863 
864   /**
865    * Computes element-wise floor() on the input tensor.
866    *
867    * Supported tensor {@link OperandCode}:
868    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
869    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
870    *
871    * Supported tensor rank: up to 4
872    *
873    * Inputs:
874    * * 0: A tensor.
875    *
876    * Outputs:
877    * * 0: The output tensor, of the same {@link OperandCode} and dimensions as
878    *      the input tensor.
879    *
880    * Available since NNAPI feature level 1.
881    */
882   ANEURALNETWORKS_FLOOR = 8,
883 
884   /**
885    * Denotes a fully (densely) connected layer, which connects all elements
886    * in the input tensor with each element in the output tensor.
887    *
888    * This layer implements the operation:
889    *
890    *     outputs = activation(inputs * weights’ + bias)
891    *
892    * Supported tensor {@link OperandCode}:
893    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
894    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
895    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
896    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
897    *
898    * Supported tensor rank: up to 4.
899    *
900    * Inputs:
901    * * 0: A tensor of at least rank 2, specifying the input. If rank is
902    *      greater than 2, then it gets flattened to a 2-D Tensor. The
903    *      (flattened) 2-D Tensor is reshaped (if necessary) to
904    *      [batch_size, input_size], where "input_size" corresponds to the
905    *      number of inputs to the layer, matching the second dimension of
906    *      weights, and "batch_size" is calculated by dividing the number of
907    *      elements by "input_size".
908    *      Since NNAPI feature level 3, zero batch_size is supported for this tensor.
909    * * 1: A 2-D tensor, specifying the weights, of shape
910    *      [num_units, input_size], where "num_units" corresponds to the number
911    *      of output nodes.
912    * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
913    *      tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should
914    *      also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
915    *      For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
916    *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
917    *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32},
918    *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
919    * * 3: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
920    *      {@link FuseCode} values. Specifies the activation to
921    *      invoke on the result.
922    *
923    * Outputs:
924    * * 0: The output tensor, of shape [batch_size, num_units]. Before NNAPI feature level 3, for
925    *      output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following
926    *      condition must be satisfied: output_scale > input_scale * filter_scale.
927    *
928    * Available since NNAPI feature level 1.
929    */
930   ANEURALNETWORKS_FULLY_CONNECTED = 9,
931 
932   /**
933    * Looks up sub-tensors in the input tensor using a key-value map.
934    *
935    * This operator takes for input a tensor of values (Values),
936    * a one-dimensional tensor of selection values (Lookups) and
937    * a one-dimensional tensor that maps these values to Values
938    * indexes. The output tensor is the concatenation of sub-tensors of
939    * Values as selected by Lookups via Keys.
940    *
941    * Think of Values as being sliced along its outer-most dimension.
942    * The output is a concatenation of selected slices, with one slice
943    * for each entry of Lookups. The slice selected is the one at the
944    * same index as the Maps entry that matches the value in Lookups.
945    *
946    * For a hit, the corresponding sub-tensor of Values is included
947    * in the Output tensor. For a miss, the corresponding sub-tensor in
948    * Output must have zero values.
949    *
950    * For example, if Values has shape of [40, 200, 300],
951    * Keys should have a shape of [40]. If Lookups tensor has shape
952    * of [3], three slices are being concatenated, so the resulting tensor
953    * must have the shape of [3, 200, 300]. If the first entry in Lookups
954    * has the value 123456, that value must be located in Keys tensor.
955    * If the sixth entry of Keys contains 123456, the sixth slice of Values
956    * must be selected. If no entry in Keys has 123456, a slice of zeroes
957    * must be concatenated.
958    *
959    * Supported value tensor {@link OperandCode}:
960    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
961    * * {@link ANEURALNETWORKS_TENSOR_INT32}
962    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
963    *
964    * Supported value tensor rank: from 2
965    *
966    * Inputs:
967    * * 0: Lookups. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with
968    *      shape [ k ].
969    * * 1: Keys. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape
970    *      [ n ]; Keys and Values pair represent a map, i.e., the ith element
971    *      in Keys (Keys[i]) is the key to select the ith sub-tensor in Values
972    *      (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in
973    *      ascending order.
974    * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension
975    *      must be n.
976    *
977    * Outputs:
978    * * 0: Output. A tensor with shape [ k …].
979    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
980    *      the scale and zeroPoint must be the same as input2.
981    * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup
982    *      hits (True) or not (False).
983    *      Stored as {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} with offset 0
984    *      and scale 1.0f.
985    *      A non-zero byte represents True, a hit. A zero indicates otherwise.
986    *
987    * Available since NNAPI feature level 1.
988    */
989   ANEURALNETWORKS_HASHTABLE_LOOKUP = 10,
990 
991   /**
992    * Applies L2 normalization along the axis dimension.
993    *
994    * The values in the output tensor are computed as:
995    *
996    *     output[batch, row, col, channel] =
997    *         input[batch, row, col, channel] /
998    *         sqrt(sum_{c} pow(input[batch, row, col, c], 2))
999    *
1000    * By default the axis dimension is the last dimension of the input tensor.
1001    *
1002    * Supported tensor {@link OperandCode}:
1003    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1004    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1005    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3)
1006    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1007    *
1008    * Supported tensor rank: up to 4
1009    * Tensors with rank less than 4 are only supported since NNAPI feature level 3.
1010    *
1011    * Inputs:
1012    * * 0: An n-D tensor, specifying the tensor to be normalized.
1013    * * 1: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1,
1014    *      specifying the dimension normalization would be performed on.
1015    *      Negative index is used to specify axis from the end (e.g. -1 for
1016    *      the last axis). Must be in the range [-n, n).
1017    *      Available since NNAPI feature level 3.
1018    *
1019    * Outputs:
1020    * * 0: A tensor of the same {@link OperandCode} and same shape as input0.
1021    *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
1022    *      the scale must be 1.f / 128 and the zeroPoint must be 128.
1023    *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
1024    *      the scale must be 1.f / 128 and the zeroPoint must be 0.
1025    *
1026    *      NOTE: Before NNAPI feature level 4, if the elements along an axis are all zeros,
1027    *      the result is undefined. Since NNAPI feature level 4, if the elements along an axis
1028    *      are all zeros, the result is logical zero.
1029    *
1030    * Available since NNAPI feature level 1.
1031    */
1032   ANEURALNETWORKS_L2_NORMALIZATION = 11,
1033 
1034   /**
1035    * Performs an 2-D L2 pooling operation.
1036    *
1037    * The output dimensions are functions of the filter dimensions, stride, and
1038    * padding.
1039    *
1040    * The values in the output tensor are computed as:
1041    *
1042    *     output[b, i, j, c] =
1043    *         sqrt(sum_{di, dj} pow(input[b, strides[1] * i + di, strides[2] * j + dj, c], 2) /
1044    *              sum(1))
1045    *
1046    * Supported tensor {@link OperandCode}:
1047    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1048    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1049    *
1050    * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1051    * With the default data layout NHWC, the data is stored in the order of:
1052    * [batch, height, width, channels]. Alternatively, the data layout could
1053    * be NCHW, the data storage order of: [batch, channels, height, width].
1054    * NCHW is supported since NNAPI feature level 3.
1055    *
1056    * Both explicit padding and implicit padding are supported.
1057    *
1058    * Inputs (explicit padding):
1059    * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1060    *      the input.
1061    *      Since NNAPI feature level 3, zero batches is supported for this tensor.
1062    * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1063    *      the left, in the ‘width’ dimension.
1064    * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1065    *      the right, in the ‘width’ dimension.
1066    * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1067    *      the top, in the ‘height’ dimension.
1068    * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1069    *      the bottom, in the ‘height’ dimension.
1070    * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1071    *      walking through input in the ‘width’ dimension.
1072    * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1073    *      walking through input in the ‘height’ dimension.
1074    * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1075    *      width.
1076    * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1077    *      height.
1078    * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1079    *      {@link FuseCode} values. Specifies the activation to
1080    *      invoke on the result.
1081    * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1082    *       Set to true to specify NCHW data layout for input0 and output0.
1083    *       Available since NNAPI feature level 3.
1084    *
1085    * Inputs (implicit padding):
1086    * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1087    *      the input.
1088    *      Since NNAPI feature level 3, zero batches is supported for this tensor.
1089    * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
1090    *      padding scheme, has to be one of the
1091    *      {@link PaddingCode} values.
1092    * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1093    *      walking through input in the ‘width’ dimension.
1094    * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1095    *      walking through input in the ‘height’ dimension.
1096    * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1097    *      width.
1098    * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1099    *      height.
1100    * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1101    *      {@link FuseCode} values. Specifies the activation to
1102    *      invoke on the result.
1103    * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1104    *      Set to true to specify NCHW data layout for input0 and output0.
1105    *      Available since NNAPI feature level 3.
1106    *
1107    * Outputs:
1108    * * 0: The output 4-D tensor, of shape
1109    *      [batches, out_height, out_width, depth].
1110    *
1111    * Available since NNAPI feature level 1.
1112    */
1113   ANEURALNETWORKS_L2_POOL_2D = 12,
1114 
1115   /**
1116    * Applies Local Response Normalization along the depth dimension.
1117    *
1118    * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the
1119    * last dimension), and each vector is normalized independently. Within a
1120    * given vector, each component is divided by the weighted, squared sum of
1121    * inputs within depth_radius.
1122    *
1123    * The output is calculated using this formula:
1124    *
1125    *     sqr_sum[a, b, c, d] = sum(
1126    *         pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2))
1127    *     output = input / pow((bias + alpha * sqr_sum), beta)
1128    *
1129    * For input tensor with rank less than 4, independently normalizes each
1130    * 1-D slice along specified dimension.
1131    *
1132    * Supported tensor {@link OperandCode}:
1133    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1134    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1135    *
1136    * Supported tensor rank: up to 4
1137    * Tensors with rank less than 4 are only supported since NNAPI feature level 3.
1138    *
1139    * Inputs:
1140    * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1141    *      the input.
1142    * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the radius of
1143    *      the normalization window.
1144    * * 2: A scalar, specifying the bias, must not be zero.
1145    *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias
1146    *      value must be of {@link ANEURALNETWORKS_FLOAT16}.
1147    *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias
1148    *      value must be of {@link ANEURALNETWORKS_FLOAT32}.
1149    * * 3: A scalar, specifying the scale factor, alpha.
1150    *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the
1151    *      alpha value must be of {@link ANEURALNETWORKS_FLOAT16}.
1152    *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the
1153    *      alpha value must be of {@link ANEURALNETWORKS_FLOAT32}.
1154    * * 4: A scalar, specifying the exponent, beta.
1155    *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta
1156    *      value must be of {@link ANEURALNETWORKS_FLOAT16}.
1157    *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta
1158    *      value must be of {@link ANEURALNETWORKS_FLOAT32}.
1159    * * 5: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1,
1160    *      specifying the dimension normalization would be performed on.
1161    *      Negative index is used to specify axis from the end (e.g. -1 for
1162    *      the last axis). Must be in the range [-n, n).
1163    *      Available since NNAPI feature level 3.
1164    *
1165    * Outputs:
1166    * * 0: The output tensor of same shape as input0.
1167    *
1168    * Available since NNAPI feature level 1.
1169    */
1170   ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION = 13,
1171 
1172   /**
1173    * Computes sigmoid activation on the input tensor element-wise.
1174    *
1175    * The output is calculated using this formula:
1176    *
1177    *     output = 1 / (1 + exp(-input))
1178    *
1179    * Supported tensor {@link OperandCode}:
1180    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1181    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1182    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1183    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1184    *
1185    * Supported tensor rank: up to 4.
1186    *
1187    * Inputs:
1188    * * 0: A tensor, specifying the input.
1189    *      Since NNAPI feature level 3, this tensor may be zero-sized.
1190    *
1191    * Outputs:
1192    * * 0: The output tensor of same shape as input0.
1193    *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
1194    *      the scale must be 1.f / 256 and the zeroPoint must be 0.
1195    *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
1196    *      the scale must be 1.f / 256 and the zeroPoint must be -128.
1197    *
1198    * Available since NNAPI feature level 1.
1199    */
1200   ANEURALNETWORKS_LOGISTIC = 14,
1201 
1202   /**
1203    * Projects an input to a bit vector via locality sensitive hashing.
1204    *
1205    * Supported input tensor {@link OperandCode}:
1206    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1207    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1208    * * {@link ANEURALNETWORKS_TENSOR_INT32}
1209    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1210    *
1211    * Supported input tensor rank: from 1
1212    *
1213    * Inputs:
1214    * * 0: Hash functions. Dim.size == 2, DataType: Float.
1215    *      Tensor[0].Dim[0]: Number of hash functions.
1216    *      Tensor[0].Dim[1]: Number of projected output bits generated by each
1217    *      hash function.
1218    *      If the projection type is Sparse:
1219    *      Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32
1220    *
1221    * * 1: Input. Dim.size >= 1, no restriction on DataType.
1222    * * 2: Weight. Optional. Dim.size == 1, DataType: Float.
1223    *      If not set, each input element is considered to have the same weight
1224    *      of 1.0.
1225    *      Tensor[1].Dim[0] == Tensor[2].Dim[0]
1226    * * 3: Type:
1227    *        Sparse:
1228    *          Value LSHProjectionType_SPARSE(=3) (since NNAPI feature level 3).
1229    *          Computed bit vector is considered to be sparse.
1230    *          Each output element is an int32 made up of multiple bits
1231    *          computed from hash functions.
1232    *
1233    *          NOTE: To avoid collisions across hash functions, an offset value
1234    *          of k * (1 << Tensor[0].Dim[1]) will be added to each signature,
1235    *          where k is the index of the hash function.
1236    *
1237    *          Value LSHProjectionType_SPARSE_DEPRECATED(=1).
1238    *          Legacy behavior that does not include the offset value.
1239    *
1240    *        Dense:
1241    *          Value LSHProjectionType_DENSE(=2).
1242    *          Computed bit vector is considered to be dense. Each output
1243    *          element represents a bit and can take the value of either
1244    *          0 or 1.
1245    *
1246    * Outputs:
1247    * * 0: If the projection type is Sparse:
1248    *      Output.Dim == { Tensor[0].Dim[0] }
1249    *      A tensor of int32 that represents hash signatures.
1250    *
1251    *      If the projection type is Dense:
1252    *      Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
1253    *      A flattened tensor that represents projected bit vectors.
1254    *
1255    * Available since NNAPI feature level 1.
1256    * The offset value for sparse projections was added in NNAPI feature level 3.
1257    */
1258   ANEURALNETWORKS_LSH_PROJECTION = 15,
1259 
1260   /**
1261    * Performs a single time step in a Long Short-Term Memory (LSTM) layer
1262    *
1263    * The LSTM operation is described by the following equations.
1264    *
1265    * \f{eqnarray*}{
1266    * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\
1267    * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\
1268    * C_t =& clip(f_t \odot C_{t-1} + i_t \odot
1269    *        g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\
1270    * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\
1271    *      & & \\
1272    *      & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})
1273    *      & if\ there\ is\ a\ projection; \\
1274    * h_t =& & \\
1275    *      & o_t \odot g(C_t) & otherwise. \\
1276    * \f}
1277    * Where:
1278    * * \f$x_t\f$ is the input,
1279    * * \f$i_t\f$ is the input gate,
1280    * * \f$f_t\f$ is the forget gate,
1281    * * \f$C_t\f$ is the cell state,
1282    * * \f$o_t\f$ is the output,
1283    * * \f$h_t\f$ is the output state,
1284    * * \f$\sigma\f$ is the logistic sigmoid function,
1285    * * \f$g\f$ is the cell input and cell output activation function, usually
1286    *   \f$tahn\f$,
1287    * * \f$W_{xi}\f$ is the input-to-input weight matrix,
1288    * * \f$W_{hi}\f$ is the recurrent to input weight matrix,
1289    * * \f$W_{ci}\f$ is the cell-to-input weight matrix,
1290    * * \f$b_i\f$ is the input gate bias,
1291    * * \f$W_{xf}\f$ is the input-to-forget weight matrix,
1292    * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix,
1293    * * \f$W_{cf}\f$ is the cell-to-forget weight matrix,
1294    * * \f$b_f\f$ is the forget gate bias,
1295    * * \f$W_{xc}\f$ is the input-to-cell weight matrix,
1296    * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix,
1297    * * \f$b_c\f$ is the cell bias,
1298    * * \f$W_{xo}\f$ is the input-to-output weight matrix,
1299    * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix,
1300    * * \f$W_{co}\f$ is the cell-to-output weight matrix,
1301    * * \f$b_o\f$ is the output gate bias,
1302    * * \f$W_{proj}\f$ is the projection weight matrix,
1303    * * \f$b_{proj}\f$ is the projection bias,
1304    * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and
1305    * * \f$t_{proj}\f$ is the threshold for clipping the projected output.
1306    * * \f$\odot\f$ is the
1307    *   <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)">
1308    *   Hadamard product</a> that takes two matrices and produces another
1309    *   matrix, each element of which is the product of the corresponding
1310    *   elements of the input matrices.
1311    *
1312    * Since NNAPI feature level 3 LSTM supports layer normalization.
1313    * In case layer normalization is used, the inputs to internal activation
1314    * functions (sigmoid and \f$g\f$) are normalized, rescaled and recentered
1315    * following an approach from section 3.1 from
1316    * https://arxiv.org/pdf/1607.06450.pdf
1317    *
1318    * The operation has the following independently optional inputs:
1319    * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights
1320    *   (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all
1321    *   have values or neither of them have values (i.e., all set to null). If
1322    *   they have values, the peephole optimization is used.
1323    * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
1324    *   (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values,
1325    *   or none of them have values. If they have no values, coupling of input
1326    *   and forget gates (CIFG) is used, in which case the input gate
1327    *   (\f$i_t\f$) is calculated using the following equation instead.
1328    *   \f{eqnarray*}{
1329    *   i_t = 1 - f_t
1330    *   \f}
1331    *   In case peephole optimization is used and CIFG is not used
1332    *   cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the
1333    *   cell-to-input weights must have no value.
1334    * * The projection weights (\f$W_{proj}\f$) is required only for the
1335    *   recurrent projection layer, and should otherwise have no value.
1336    * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
1337    *   value if the recurrent projection layer exists, and should otherwise
1338    *   have no value.
1339    * * (NNAPI feature level 3 or later) The four layer normalization weights either all have
1340    *   values or none of them have values. Additionally, if CIFG is used,
1341    *   input layer normalization weights tensor is omitted and the other layer
1342    *   normalization weights either all have values or none of them have
1343    *   values. Layer normalization is used when the values of all the layer
1344    *   normalization weights are present.
1345    *
1346    * References:
1347    *
1348    * The default non-peephole non-CIFG implementation is based on:
1349    * http://www.bioinf.jku.at/publications/older/2604.pdf
1350    * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural
1351    * Computation, 9(8):1735-1780, 1997.
1352    *
1353    * The peephole implementation and projection layer is based on:
1354    * https://research.google.com/pubs/archive/43905.pdf
1355    * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
1356    * recurrent neural network architectures for large scale acoustic
1357    * modeling." INTERSPEECH, 2014.
1358    * (However, the concept of peephole optimization was introduced in work
1359    * prior to this paper.)
1360    *
1361    * The coupling of input and forget gate (CIFG) is based on:
1362    * http://arxiv.org/pdf/1503.04069.pdf
1363    * Greff et al. "LSTM: A Search Space Odyssey"
1364    *
1365    * The layer normalization is based on:
1366    * https://arxiv.org/pdf/1607.06450.pdf
1367    * Jimmy Ba et al. "Layer Normalization"
1368    *
1369    * Supported tensor {@link OperandCode}:
1370    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1371    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1372    *
1373    * All input and output tensors must be of the same type.
1374    *
1375    * Inputs:
1376    * * 0: The input (\f$x_t\f$).
1377    *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
1378    *      corresponds to the batching dimension, and “input_size” is the size
1379    *      of the input.
1380    * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
1381    *      A 2-D tensor of shape [num_units, input_size], where “num_units”
1382    *      corresponds to the number of cell units.
1383    * * 2: The input-to-forget weights (\f$W_{xf}\f$).
1384    *      A 2-D tensor of shape [num_units, input_size].
1385    * * 3: The input-to-cell weights (\f$W_{xc}\f$).
1386    *      A 2-D tensor of shape [num_units, input_size].
1387    * * 4: The input-to-output weights (\f$W_{xo}\f$).
1388    *      A 2-D tensor of shape [num_units, input_size].
1389    * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
1390    *      A 2-D tensor of shape [num_units, output_size], where “output_size”
1391    *      corresponds to either the number of cell units (i.e., “num_units”),
1392    *      or the second dimension of the “projection_weights”, if defined.
1393    * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
1394    *      A 2-D tensor of shape [num_units, output_size].
1395    * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
1396    *      A 2-D tensor of shape [num_units, output_size].
1397    * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
1398    *      A 2-D tensor of shape [num_units, output_size].
1399    * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
1400    *      A 1-D tensor of shape [num_units].
1401    * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
1402    *      A 1-D tensor of shape [num_units].
1403    * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
1404    *      A 1-D tensor of shape [num_units].
1405    * * 12:The input gate bias (\f$b_i\f$). Optional.
1406    *      A 1-D tensor of shape [num_units].
1407    * * 13:The forget gate bias (\f$b_f\f$).
1408    *      A 1-D tensor of shape [num_units].
1409    * * 14:The cell bias (\f$b_c\f$).
1410    *      A 1-D tensor of shape [num_units].
1411    * * 15:The output gate bias (\f$b_o\f$).
1412    *      A 1-D tensor of shape [num_units].
1413    * * 16:The projection weights (\f$W_{proj}\f$). Optional.
1414    *      A 2-D tensor of shape [output_size, num_units].
1415    * * 17:The projection bias (\f$b_{proj}\f$). Optional.
1416    *      A 1-D tensor of shape [output_size].
1417    * * 18:The output state (in) (\f$h_{t-1}\f$).
1418    *      A 2-D tensor of shape [batch_size, output_size].
1419    * * 19:The cell state (in) (\f$C_{t-1}\f$).
1420    *      A 2-D tensor of shape [batch_size, num_units].
1421    * * 20:The activation function (\f$g\f$).
1422    *      A value indicating the activation function:
1423    *      <ul>
1424    *      <li>0: None;
1425    *      <li>1: Relu;
1426    *      <li>3: Relu6;
1427    *      <li>4: Tanh;
1428    *      <li>6: Sigmoid.
1429    *      </ul>
1430    * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
1431    *      that values are bound within [-cell_clip, cell_clip]. If set to 0.0
1432    *      then clipping is disabled.
1433    *      Until NNAPI feature level 3 this scalar must be of type {@link
1434    *      ANEURALNETWORKS_FLOAT32}. Since NNAPI feature level 3, if all the input
1435    *      tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this
1436    *      scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
1437    *      otherwise if all the input tensors have the type {@link
1438    *      ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link
1439    *      ANEURALNETWORKS_FLOAT16}.
1440    * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
1441    *      projection layer, such that values are bound within
1442    *      [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1443    *      Until NNAPI feature level 3 this scalar must be of type {@link
1444    *      ANEURALNETWORKS_FLOAT32}. Since NNAPI feature level 3, if all the input
1445    *      tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this
1446    *      scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
1447    *      otherwise if all the input tensors have the type {@link
1448    *      ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link
1449    *      ANEURALNETWORKS_FLOAT16}.
1450    * Since NNAPI feature level 3 there are additional inputs to this op:
1451    * * 23:The input layer normalization weights.
1452    *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1453    *      to activation at input gate.
1454    * * 24:The forget layer normalization weights.
1455    *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1456    *      to activation at forget gate.
1457    * * 25:The cell layer normalization weights.
1458    *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1459    *      to activation at cell gate.
1460    * * 26:The output layer normalization weights.
1461    *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1462    *      to activation at output gate.
1463    *
1464    * Outputs:
1465    * * 0: The scratch buffer.
1466    *      A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or
1467    *      [batch_size, num_units * 4] without CIFG.
1468    * * 1: The output state (out) (\f$h_t\f$).
1469    *      A 2-D tensor of shape [batch_size, output_size].
1470    * * 2: The cell state (out) (\f$C_t\f$).
1471    *      A 2-D tensor of shape [batch_size, num_units].
1472    * * 3: The output (\f$o_t\f$).
1473    *      A 2-D tensor of shape [batch_size, output_size]. This is effectively
1474    *      the same as the current “output state (out)” value.
1475    *
1476    * Available since NNAPI feature level 1.
1477    */
1478   ANEURALNETWORKS_LSTM = 16,
1479 
1480   /**
1481    * Performs an 2-D max pooling operation.
1482    *
1483    * The output dimensions are functions of the filter dimensions, stride, and
1484    * padding.
1485    *
1486    * The values in the output tensor are computed as:
1487    *
1488    *     output[b, i, j, channel] =
1489    *         max_{di, dj} (
1490    *             input[b, strides[1] * i + di, strides[2] * j + dj, channel]
1491    *         )
1492    *
1493    * Supported tensor {@link OperandCode}:
1494    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1495    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1496    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1497    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1498    *
1499    * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1500    * With the default data layout NHWC, the data is stored in the order of:
1501    * [batch, height, width, channels]. Alternatively, the data layout could
1502    * be NCHW, the data storage order of: [batch, channels, height, width].
1503    * NCHW is supported since NNAPI feature level 3.
1504    *
1505    * Both explicit padding and implicit padding are supported.
1506    *
1507    * Inputs (explicit padding):
1508    * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1509    *      the input.
1510    *      Since NNAPI feature level 3, zero batches is supported for this tensor.
1511    * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1512    *      the left, in the ‘width’ dimension.
1513    * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1514    *      the right, in the ‘width’ dimension.
1515    * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1516    *      the top, in the ‘height’ dimension.
1517    * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1518    *      the bottom, in the ‘height’ dimension.
1519    * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1520    *      walking through input in the ‘width’ dimension.
1521    * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1522    *      walking through input in the ‘height’ dimension.
1523    * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1524    *      width.
1525    * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1526    *      height.
1527    * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1528    *      {@link FuseCode} values. Specifies the activation to
1529    *      invoke on the result.
1530    * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1531    *       Set to true to specify NCHW data layout for input0 and output0.
1532    *       Available since NNAPI feature level 3.
1533    *
1534    * Inputs (implicit padding):
1535    * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1536    *      the input.
1537    *      Since NNAPI feature level 3, zero batches is supported for this tensor.
1538    * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
1539    *      padding scheme, has to be one of the
1540    *      {@link PaddingCode} values.
1541    * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1542    *      walking through input in the ‘width’ dimension.
1543    * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1544    *      walking through input in the ‘height’ dimension.
1545    * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1546    *      width.
1547    * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1548    *      height.
1549    * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1550    *      {@link FuseCode} values. Specifies the activation to
1551    *      invoke on the result.
1552    * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1553    *      Set to true to specify NCHW data layout for input0 and output0.
1554    *      Available since NNAPI feature level 3.
1555    *
1556    * Outputs:
1557    * * 0: The output 4-D tensor, of shape
1558    *      [batches, out_height, out_width, depth].
1559    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1560    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1561    *      the scale and zeroPoint must be the same as input0.
1562    *
1563    * Available since NNAPI feature level 1.
1564    */
1565   ANEURALNETWORKS_MAX_POOL_2D = 17,
1566 
1567   /**
1568    * Multiplies two tensors, element-wise.
1569    *
1570    * Takes two input tensors of identical {@link OperandCode} and compatible
1571    * dimensions. The output is the product of both input tensors, optionally
1572    * modified by an activation function.
1573    *
1574    * Two dimensions are compatible when:
1575    *     1. they are equal, or
1576    *     2. one of them is 1
1577    *
1578    * The size of the resulting output is the maximum size along each dimension
1579    * of the input operands. It starts with the trailing dimensions, and works
1580    * its way forward.
1581    *
1582    * Since NNAPI feature level 3, generic zero-sized input tensor is supported. Zero
1583    * dimension is only compatible with 0 or 1. The size of the output
1584    * dimension is zero if either of corresponding input dimension is zero.
1585    *
1586    * Supported tensor {@link OperandCode}:
1587    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1588    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1589    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1590    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1591    * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4)
1592    *
1593    * Supported tensor rank: up to 4
1594    *
1595    * Inputs:
1596    * * 0: A tensor.
1597    * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
1598    *      as input0.
1599    * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1600    *      {@link FuseCode} values. Specifies the activation to
1601    *      invoke on the result.
1602    *      For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,
1603    *      the {@link FuseCode} must be "NONE".
1604    *
1605    * Outputs:
1606    * * 0: The product, a tensor of the same {@link OperandCode} as input0.
1607    *      For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1608    *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
1609    *      the following condition must be satisfied:
1610    *      output_scale > input1_scale * input2_scale.
1611    *
1612    * Available since NNAPI feature level 1.
1613    */
1614   ANEURALNETWORKS_MUL = 18,
1615 
1616   /**
1617    * Computes rectified linear activation on the input tensor element-wise.
1618    *
1619    * The output is calculated using this formula:
1620    *
1621    *     output = max(0, input)
1622    *
1623    * Supported tensor {@link OperandCode}:
1624    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1625    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1626    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1627    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1628    *
1629    * Supported tensor rank: up to 4.
1630    *
1631    * Inputs:
1632    * * 0: A tensor, specifying the input.
1633    *      Since NNAPI feature level 3, this tensor may be zero-sized.
1634    *
1635    * Outputs:
1636    * * 0: The output tensor of same shape as input0.
1637    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1638    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1639    *      the scale and zeroPoint must be the same as input0.
1640    *
1641    * Available since NNAPI feature level 1.
1642    */
1643   ANEURALNETWORKS_RELU = 19,
1644 
1645   /**
1646    * Computes rectified linear 1 activation on the input tensor element-wise.
1647    *
1648    * The output is calculated using this formula:
1649    *
1650    *     output = min(1.f, max(-1.f, input))
1651    *
1652    * Supported tensor {@link OperandCode}:
1653    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1654    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1655    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1656    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1657    *
1658    * Supported tensor rank: up to 4.
1659    *
1660    * Inputs:
1661    * * 0: A tensor, specifying the input.
1662    *      Since NNAPI feature level 3, this tensor may be zero-sized.
1663    *
1664    * Outputs:
1665    * * 0: The output tensor of the same shape as input0.
1666    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1667    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1668    *      the scale and zeroPoint must be the same as input0.
1669    *
1670    * Available since NNAPI feature level 1.
1671    */
1672   ANEURALNETWORKS_RELU1 = 20,
1673 
1674   /**
1675    * Computes rectified linear 6 activation on the input tensor element-wise.
1676    *
1677    * The output is calculated using this formula:
1678    *
1679    *     output = min(6, max(0, input))
1680    *
1681    * Supported tensor {@link OperandCode}:
1682    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1683    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1684    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1685    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1686    *
1687    * Supported tensor rank: up to 4.
1688    *
1689    * Inputs:
1690    * * 0: A tensor, specifying the input.
1691    *      Since NNAPI feature level 3, this tensor may be zero-sized.
1692    *
1693    * Outputs:
1694    * * 0: The output tensor of same shape as input0.
1695    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1696    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1697    *      the scale and zeroPoint must be the same as input0.
1698    *
1699    * Available since NNAPI feature level 1.
1700    */
1701   ANEURALNETWORKS_RELU6 = 21,
1702 
1703   /**
1704    * Reshapes a tensor.
1705    *
1706    * Given tensor, this operation returns a tensor that has the same values as
1707    * tensor, but with a newly specified shape.
1708    *
1709    * Supported tensor {@link OperandCode}:
1710    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1711    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1712    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1713    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1714    *
1715    * Supported tensor rank: up to 4.
1716    *
1717    * Inputs:
1718    * * 0: A tensor, specifying the tensor to be reshaped.
1719    * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, defining the
1720    *      shape of the output tensor. The number of elements implied by shape
1721    *      must be the same as the number of elements in the input tensor.
1722    *
1723    *      If one component of shape is the special value -1, the size of that
1724    *      dimension is computed so that the total size remains constant. In
1725    *      particular, a shape of [-1] flattens into 1-D. At most one component
1726    *      of shape can be -1.
1727    *
1728    * Outputs:
1729    * * 0: The output tensor, of shape specified by the input shape.
1730    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1731    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1732    *      the scale and zeroPoint must be the same as input0.
1733    *
1734    * Available since NNAPI feature level 1.
1735    */
1736   ANEURALNETWORKS_RESHAPE = 22,
1737 
1738   /**
1739    * Resizes images to given size using the bilinear interpretation.
1740    *
1741    * Resized images must be distorted if their output aspect ratio is not the
1742    * same as input aspect ratio. The corner pixels of output may not be the
1743    * same as corner pixels of input.
1744    *
1745    * Supported tensor {@link OperandCode}:
1746    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1747    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1748    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3)
1749    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1750    *
1751    * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1752    * With the default data layout NHWC, the data is stored in the order of:
1753    * [batch, height, width, channels]. Alternatively, the data layout could
1754    * be NCHW, the data storage order of: [batch, channels, height, width].
1755    * NCHW is supported since NNAPI feature level 3.
1756    *
1757    * Both resizing by shape and resizing by scale are supported.
1758    *
1759    * Inputs (resizing by shape):
1760    * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1761    *      the input.
1762    *      Since NNAPI feature level 3, zero batches is supported for this tensor.
1763    * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
1764    *      width of the output tensor.
1765    * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
1766    *      height of the output tensor.
1767    * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1768    *      Set to true to specify NCHW data layout for input0 and output0.
1769    *      Available since NNAPI feature level 3.
1770    * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}
1771    *      scalar, default to false.  If True, the centers of the 4 corner
1772    *      pixels of the input and output tensors are aligned, preserving the
1773    *      values at the corner pixels.
1774    *      Available since NNAPI feature level 4.
1775    * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}
1776    *      scalar, default to false. If True, the pixel centers are assumed to
1777    *      be at (0.5, 0.5). This is the default behavior of image.resize in
1778    *      TF 2.0. If this parameter is True, then align_corners parameter
1779    *      must be False.
1780    *      Available since NNAPI feature level 4.
1781    *
1782    * Inputs (resizing by scale, since NNAPI feature level 3):
1783    * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1784    *      the input. Zero batches is supported for this tensor.
1785    * * 1: A scalar, specifying width_scale, the scaling factor of the width
1786    *      dimension from the input tensor to the output tensor. The output
1787    *      width is calculated as new_width = floor(width * width_scale).
1788    *      The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
1789    *      of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
1790    *      {@link ANEURALNETWORKS_FLOAT32} otherwise.
1791    * * 2: A scalar, specifying height_scale, the scaling factor of the height
1792    *      dimension from the input tensor to the output tensor. The output
1793    *      height is calculated as new_height = floor(height * height_scale).
1794    *      The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
1795    *      of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
1796    *      {@link ANEURALNETWORKS_FLOAT32} otherwise.
1797    * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1798    *      Set to true to specify NCHW data layout for input0 and output0.
1799    * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}
1800    *      scalar, default to false.  If True, the centers of the 4 corner
1801    *      pixels of the input and output tensors are aligned, preserving the
1802    *      values at the corner pixels.
1803    *      Available since NNAPI feature level 4.
1804    * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}
1805    *      scalar, default to false. If True, the pixel centers are assumed to
1806    *      be at (0.5, 0.5). This is the default behavior of image.resize in
1807    *      TF 2.0. If this parameter is True, then align_corners parameter
1808    *      must be False.
1809    *      Available since NNAPI feature level 4.
1810    *
1811    * Outputs:
1812    * * 0: The output 4-D tensor, of shape
1813    *      [batches, new_height, new_width, depth].
1814    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1815    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1816    *      the scale and zeroPoint must be the same as input0.
1817    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
1818    *      the scale and zeroPoint must be the same as input0.
1819    *
1820    * Available since NNAPI feature level 1.
1821    */
1822   ANEURALNETWORKS_RESIZE_BILINEAR = 23,
1823 
1824   /**
1825    * A basic recurrent neural network layer.
1826    *
1827    * This layer implements the operation:
1828    * outputs = state = activation(inputs * input_weights +
1829    *                              state * recurrent_weights + bias)
1830    *
1831    * Where:
1832    * * “input_weights” is a weight matrix that multiplies the inputs;
1833    * * “recurrent_weights” is a weight matrix that multiplies the current
1834    *    “state” which itself is the output from the previous time step
1835    *    computation;
1836    * * “bias” is a bias vector (added to each output vector in the batch);
1837    * * “activation” is the function passed as the “fused_activation_function”
1838    *   argument (if not “NONE”).
1839    *
1840    * Supported tensor {@link OperandCode}:
1841    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1842    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1843    *
1844    * The input tensors must all be the same type.
1845    *
1846    * Inputs:
1847    * * 0: input.
1848    *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
1849    *      corresponds to the batching dimension, and “input_size” is the size
1850    *      of the input.
1851    * * 1: weights.
1852    *      A 2-D tensor of shape [num_units, input_size], where “num_units”
1853    *      corresponds to the number of units.
1854    * * 2: recurrent_weights.
1855    *      A 2-D tensor of shape [num_units, num_units], with columns
1856    *      corresponding to the weights from each unit.
1857    * * 3: bias.
1858    *      A 1-D tensor of shape [num_units].
1859    * * 4: hidden state (in).
1860    *      A 2-D tensor of shape [batch_size, num_units].
1861    * * 5: fused_activation_function.
1862    *      An optional {@link FuseCode} value indicating the
1863    *      activation function. If “NONE” is specified then it results in a
1864    *      linear activation.
1865    *
1866    * Outputs:
1867    * * 0: hidden state (out).
1868    *      A 2-D tensor of shape [batch_size, num_units].
1869    *
1870    * * 1: output.
1871    *      A 2-D tensor of shape [batch_size, num_units]. This is effectively
1872    *      the same as the current state value.
1873    *
1874    * Available since NNAPI feature level 1.
1875    */
1876   ANEURALNETWORKS_RNN = 24,
1877 
1878   /**
1879    * Computes the softmax activation on the input tensor element-wise, per
1880    * batch, by normalizing the input vector so the maximum coefficient is
1881    * zero.
1882    *
1883    * The output is calculated using this formula:
1884    *
1885    *     output[batch, i] =
1886    *         exp((input[batch, i] - max(input[batch, :])) * beta) /
1887    *         sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
1888    *
1889    * For input tensor with rank other than 2, the activation will be applied
1890    * independently on each 1-D slice along specified dimension.
1891    *
1892    * Supported tensor {@link OperandCode}:
1893    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1894    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1895    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1896    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1897    *
1898    * Supported tensor rank: up to 4.
1899    * Tensors with rank other than 2 or 4 are only supported since NNAPI feature level 3.
1900    *
1901    * Inputs:
1902    * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
1903    *      Since NNAPI feature level 3, this tensor may be zero-sized.
1904    * * 1: A scalar, specifying the positive scaling factor for the exponent,
1905    *      beta. If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT32},
1906    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
1907    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, the scalar
1908    *      must be of {@link ANEURALNETWORKS_FLOAT32}.
1909    *      If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, then the
1910    *      scalar must be of {@link ANEURALNETWORKS_FLOAT16}.
1911    * * 2: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1,
1912    *      specifying the dimension the activation would be performed on.
1913    *      Negative index is used to specify axis from the end (e.g. -1 for
1914    *      the last axis). Must be in the range [-n, n).
1915    *      Available since NNAPI feature level 3.
1916    *
1917    * Outputs:
1918    * * 0: The output tensor of same shape as input0.
1919    *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
1920    *      the scale must be 1.f / 256 and the zeroPoint must be 0.
1921    *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
1922    *      the scale must be 1.f / 256 and the zeroPoint must be -128.
1923    *
1924    * Available since NNAPI feature level 1.
1925    */
1926   ANEURALNETWORKS_SOFTMAX = 25,
1927 
1928   /**
1929    * Rearranges blocks of spatial data, into depth.
1930    *
1931    * More specifically, this op outputs a copy of the input tensor where
1932    * values from the height and width dimensions are moved to the depth
1933    * dimension. The value block_size indicates the input block size and how
1934    * the data is moved.
1935    *
1936    * Chunks of data of size block_size * block_size from depth are rearranged
1937    * into non-overlapping blocks of size block_size x block_size.
1938    *
1939    * The depth of the output tensor is input_depth * block_size * block_size.
1940    * The input tensor's height and width must be divisible by block_size.
1941    *
1942    * Supported tensor {@link OperandCode}:
1943    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1944    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1945    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1946    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1947    *
1948    * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1949    * With the default data layout NHWC, the data is stored in the order of:
1950    * [batch, height, width, channels]. Alternatively, the data layout could
1951    * be NCHW, the data storage order of: [batch, channels, height, width].
1952    * NCHW is supported since NNAPI feature level 3.
1953    *
1954    * Inputs:
1955    * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
1956    *      specifying the input.
1957    * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size.
1958    *      block_size must be >=1 and block_size must be a divisor of both the
1959    *      input height and width.
1960    * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1961    *      Set to true to specify NCHW data layout for input0 and output0.
1962    *      Available since NNAPI feature level 3.
1963    *
1964    * Outputs:
1965    * * 0: The output 4-D tensor, of shape [batches, height/block_size,
1966    *      width/block_size, depth_in*block_size*block_size].
1967    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1968    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1969    *      the scale and zeroPoint must be the same as input0.
1970    *
1971    * Available since NNAPI feature level 1.
1972    */
1973   ANEURALNETWORKS_SPACE_TO_DEPTH = 26,
1974 
1975   /**
1976    * SVDF op is a kind of stateful layer derived from the notion that a
1977    * densely connected layer that's processing a sequence of input frames can
1978    * be approximated by using a singular value decomposition of each of its
1979    * nodes. The implementation is based on:
1980    *
1981    * https://research.google.com/pubs/archive/43813.pdf
1982    *
1983    * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada.
1984    * “Compressing Deep Neural Networks using a Rank-Constrained Topology”.
1985    * INTERSPEECH, 2015.
1986    *
1987    * It processes the incoming input using a 2-stage filtering mechanism:
1988    * * stage 1 performs filtering on the "features" dimension, whose outputs
1989    *   get pushed into a memory of fixed-size memory_size.
1990    * * stage 2 performs filtering on the "time" dimension of the memory_size
1991    *   memoized outputs of stage 1.
1992    *
1993    * Specifically, for rank 1, this layer implements the operation:
1994    *
1995    *     memory = push(conv1d(inputs, weights_feature, feature_dim,
1996    *                          "ANEURALNETWORKS_PADDING_VALID"));
1997    *     outputs = activation(memory * weights_time + bias);
1998    *
1999    * Where:
2000    * * “weights_feature” is a weights matrix that processes the inputs (by
2001    *   convolving the input with every “feature filter”), and whose outputs
2002    *   get pushed, stacked in order, into the fixed-size “memory” (the oldest
2003    *   entry gets dropped);
2004    * * “weights_time” is a weights matrix that processes the “memory” (by a
2005    *   batched matrix multiplication on the num_units);
2006    * * “bias” is an optional bias vector (added to each output vector in the
2007    *   batch); and
2008    * * “activation” is the function passed as the “fused_activation_function”
2009    *   argument (if not “NONE”).
2010    *
2011    * Each rank adds a dimension to the weights matrices by means of stacking
2012    * the filters.
2013    *
2014    * Supported tensor {@link OperandCode}:
2015    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2016    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2017    *
2018    * All input tensors must be the same type.
2019    *
2020    * Inputs:
2021    * * 0: input.
2022    *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
2023    *      corresponds to the batching dimension, and “input_size” is the size
2024    *      of the input.
2025    * * 1: weights_feature.
2026    *      A 2-D tensor of shape [num_units, input_size], where “num_units”
2027    *      corresponds to the number of units.
2028    * * 2: weights_time.
2029    *      A 2-D tensor of shape [num_units, memory_size], where “memory_size”
2030    *      corresponds to the fixed-size of the memory.
2031    * * 3: bias.
2032    *      An optional 1-D tensor of shape [num_units].
2033    * * 4: state (in).
2034    *      A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank].
2035    * * 5: rank.
2036    *      The rank of the SVD approximation.
2037    * * 6: fused_activation_function.
2038    *      An optional {@link FuseCode} value indicating the
2039    *      activation function. If “NONE” is specified then it results in a
2040    *      linear activation.
2041    *
2042    * Outputs:
2043    * * 0: state (out).
2044    *      A 2-D tensor of the same {@link OperandCode} as the inputs, with shape
2045    *      [batch_size, (memory_size - 1) * num_units * rank].
2046    * * 1: output.
2047    *      A 2-D tensor of the same {@link OperandCode} as the inputs, with shape
2048    *      [batch_size, num_units].
2049    *
2050    * Available since NNAPI feature level 1.
2051    */
2052   ANEURALNETWORKS_SVDF = 27,
2053 
2054   /**
2055    * Computes hyperbolic tangent of input tensor element-wise.
2056    *
2057    * The output is calculated using this formula:
2058    *
2059    *     output = tanh(input)
2060    *
2061    * Supported tensor {@link OperandCode}:
2062    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2063    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2064    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3)
2065    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2066    *
2067    * Supported tensor rank: up to 4.
2068    *
2069    * Inputs:
2070    * * 0: A tensor, specifying the input.
2071    *      Since NNAPI feature level 3, this tensor may be zero-sized.
2072    *
2073    * Outputs:
2074    * * 0: The output tensor of same shape as input0.
2075    *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
2076    *      the scale must be 1.f / 128 and the zeroPoint must be 128.
2077    *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
2078    *      the scale must be 1.f / 128 and the zeroPoint must be 0.
2079    *
2080    * Available since NNAPI feature level 1.
2081    */
2082   ANEURALNETWORKS_TANH = 28,
2083 
2084   // Operations below are available since NNAPI feature level 2.
2085 
2086   /**
2087    * BatchToSpace for N-dimensional tensors.
2088    *
2089    * This operation reshapes the batch dimension (dimension 0) into M + 1
2090    * dimensions of shape block_shape + [batch], interleaves these blocks back
2091    * into the grid defined by the spatial dimensions [1, ..., M], to obtain a
2092    * result with the same rank as the input.
2093    *
2094    * This is the reverse of SpaceToBatch.
2095    *
2096    * Supported tensor {@link OperandCode}:
2097    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2098    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2099    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2100    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2101    *
2102    * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
2103    * With the default data layout NHWC, the data is stored in the order of:
2104    * [batch, height, width, channels]. Alternatively, the data layout could
2105    * be NCHW, the data storage order of: [batch, channels, height, width].
2106    * NCHW is supported since NNAPI feature level 3.
2107    *
2108    * Inputs:
2109    * * 0: An n-D tensor, specifying the tensor to be reshaped
2110    * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block
2111    *      sizes for each spatial dimension of the input tensor. All values
2112    *      must be >= 1.
2113    * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
2114    *      Set to true to specify NCHW data layout for input0 and output0.
2115    *      Available since API level 29.
2116    *
2117    * Outputs:
2118    * * 0: A tensor of the same {@link OperandCode} as input0.
2119    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2120    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2121    *      the scale and zeroPoint must be the same as input0.
2122    *
2123    * Available since NNAPI feature level 2.
2124    */
2125   ANEURALNETWORKS_BATCH_TO_SPACE_ND = 29,
2126 
2127   /**
2128    * Element-wise division of two tensors.
2129    *
2130    * Takes two input tensors of identical {@link OperandCode} and compatible
2131    * dimensions. The output is the result of dividing the first input tensor
2132    * by the second, optionally modified by an activation function.
2133    *
2134    * For inputs of {@link ANEURALNETWORKS_TENSOR_INT32}, performs
2135    * "floor division" ("//" in Python). For example,
2136    *     5 // 2 = 2
2137    *    -5 // 2 = -3
2138    *
2139    * Two dimensions are compatible when:
2140    *     1. they are equal, or
2141    *     2. one of them is 1
2142    *
2143    * The size of the output is the maximum size along each dimension of the
2144    * input operands. It starts with the trailing dimensions, and works its way
2145    * forward.
2146    *
2147    * Example:
2148    *     input1.dimension =    {4, 1, 2}
2149    *     input2.dimension = {5, 4, 3, 1}
2150    *     output.dimension = {5, 4, 3, 2}
2151    *
2152    * Since NNAPI feature level 3, generic zero-sized input tensor is supported. Zero
2153    * dimension is only compatible with 0 or 1. The size of the output
2154    * dimension is zero if either of corresponding input dimension is zero.
2155    *
2156    * Supported tensor {@link OperandCode}:
2157    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2158    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2159    * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4)
2160    *
2161    * Supported tensor rank: up to 4
2162    *
2163    * Inputs:
2164    * * 0: An n-D tensor, specifying the first input.
2165    * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
2166    *      as input0.
2167    * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
2168    *      {@link FuseCode} values. Specifies the activation to
2169    *      invoke on the result.
2170    *      For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,
2171    *      the {@link FuseCode} must be "NONE".
2172    *
2173    * Outputs:
2174    * * 0: A tensor of the same {@link OperandCode} as input0.
2175    *
2176    * Available since NNAPI feature level 2.
2177    */
2178   ANEURALNETWORKS_DIV = 30,
2179 
2180   /**
2181    * Computes the mean of elements across dimensions of a tensor.
2182    *
2183    * Reduces the input tensor along the given dimensions to reduce. Unless
2184    * keep_dims is true, the rank of the tensor is reduced by 1 for each entry
2185    * in axis. If keep_dims is true, the reduced dimensions are retained with
2186    * length 1.
2187    *
2188    * Supported tensor {@link OperandCode}:
2189    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2190    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2191    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2192    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2193    *
2194    * Supported tensor rank: up to 4
2195    *
2196    * Inputs:
2197    * * 0: A tensor, specifying the input.
2198    * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
2199    *      to reduce. Must be in the range
2200    *      [-rank(input_tensor), rank(input_tensor)).
2201    *
2202    *      NOTE: When the operation was introduced, the documentation
2203    *      incorrectly stated that if dimensions were empty, the operation
2204    *      would reduce across all dimensions. This behavior was never
2205    *      implemented.
2206    *
2207    * * 2: An {@link ANEURALNETWORKS_INT32} scalar, keep_dims. If positive,
2208    *      retains reduced dimensions with length 1.
2209    *
2210    * Outputs:
2211    * * 0: A tensor of the same {@link OperandCode} as input0.
2212    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2213    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2214    *      the scale and zeroPoint must be the same as input0.
2215    *      If all dimensions are reduced and keep_dims is false, the output
2216    *      shape is [1].
2217    *
2218    * Available since NNAPI feature level 2.
2219    */
2220   ANEURALNETWORKS_MEAN = 31,
2221 
2222   /**
2223    * Pads a tensor.
2224    *
2225    * This operation pads a tensor according to the specified paddings.
2226    *
2227    * Supported tensor {@link OperandCode}:
2228    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2229    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2230    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2231    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2232    *   (full support since NNAPI feature level 3, see the output section)
2233    *
2234    * Supported tensor rank: up to 4
2235    *
2236    * Inputs:
2237    * * 0: An n-D tensor, specifying the tensor to be padded.
2238    * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings
2239    *      for each spatial dimension of the input tensor. The shape of the
2240    *      tensor must be {rank(input0), 2}.
2241    *      padding[i, 0] specifies the number of elements to be padded in the
2242    *      front of dimension i.
2243    *      padding[i, 1] specifies the number of elements to be padded after the
2244    *      end of dimension i.
2245    *
2246    * Outputs:
2247    * * 0: A tensor of the same {@link OperandCode} as input0. The
2248    *      output tensor has the same rank as input0, and each
2249    *      dimension of the output tensor has the same size as the
2250    *      corresponding dimension of the input tensor plus the size
2251    *      of the padding:
2252    *          output0.dimension[i] =
2253    *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
2254    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2255    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2256    *      the scale and zeroPoint must be the same as input0.
2257    *
2258    *      NOTE: Before NNAPI feature level 3, the pad value for
2259    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined.
2260    *      Since NNAPI feature level 3, the pad value is always the logical zero.
2261    *
2262    * Available since NNAPI feature level 2.
2263    */
2264   ANEURALNETWORKS_PAD = 32,
2265 
2266   /**
2267    * SpaceToBatch for N-Dimensional tensors.
2268    *
2269    * This operation divides "spatial" dimensions [1, ..., M] of the input into
2270    * a grid of blocks of shape block_shape, and interleaves these blocks with
2271    * the "batch" dimension (0) such that in the output, the spatial dimensions
2272    * [1, ..., M] correspond to the position within the grid, and the batch
2273    * dimension combines both the position within a spatial block and the
2274    * original batch position. Prior to division into blocks, the spatial
2275    * dimensions of the input are optionally zero padded according to paddings.
2276    *
2277    * Supported tensor {@link OperandCode}:
2278    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2279    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2280    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2281    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2282    *   (full support since NNAPI feature level 3, see the output section)
2283    *
2284    * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
2285    * With the default data layout NHWC, the data is stored in the order of:
2286    * [batch, height, width, channels]. Alternatively, the data layout could
2287    * be NCHW, the data storage order of: [batch, channels, height, width].
2288    * NCHW is supported since NNAPI feature level 3.
2289    *
2290    * Inputs:
2291    * * 0: An n-D tensor, specifying the input.
2292    * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block
2293    *      sizes for each spatial dimension of the input tensor. All values
2294    *      must be >= 1.
2295    * * 2: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings
2296    *      for each spatial dimension of the input tensor. All values must be
2297    *      >= 0. The shape of the tensor must be {M, 2}, where M is the number
2298    *      of spatial dimensions.
2299    *      padding[i, 0] specifies the number of element to be padded in the
2300    *      front of dimension i.
2301    *      padding[i, 1] specifies the number of element to be padded after the
2302    *      end of dimension i.
2303    * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
2304    *      Set to true to specify NCHW data layout for input0 and output0.
2305    *      Available since NNAPI feature level 3.
2306    *
2307    * Outputs:
2308    * * 0: A tensor of the same {@link OperandCode} as input0.
2309    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2310    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2311    *      the scale and zeroPoint must be the same as input0.
2312    *
2313    *      NOTE: Before NNAPI feature level 3, the pad value for
2314    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined.
2315    *      Since NNAPI feature level 3, the pad value is always the logical zero.
2316    *
2317    * Available since NNAPI feature level 2.
2318    */
2319   ANEURALNETWORKS_SPACE_TO_BATCH_ND = 33,
2320 
2321   /**
2322    * Removes dimensions of size 1 from the shape of a tensor.
2323    *
2324    * Given a tensor input, this operation returns a tensor of the same
2325    * {@link OperandCode} with all dimensions of size 1 removed. If you don't
2326    * want to remove all size 1 dimensions, you can remove specific size 1
2327    * dimensions by specifying the axes (input1).
2328    *
2329    * Supported tensor {@link OperandCode}:
2330    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2331    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2332    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2333    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2334    *
2335    * Supported tensor rank: up to 4
2336    *
2337    * Inputs:
2338    * * 0: An n-D tensor, the tensor to be squeezed.
2339    * * 1: An optional 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
2340    *      dimensions to squeeze. If specified only squeezes the dimensions
2341    *      listed. Otherwise, squeezes all dimensions. The dimension index
2342    *      starts at 0. An error must be reported if squeezing a dimension that
2343    *      is not 1.
2344    *
2345    * Outputs:
2346    * * 0: A tensor of the same {@link OperandCode} as input0. Contains the
2347    *      same data as input, but has one or more dimensions of size 1
2348    *      removed.
2349    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2350    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2351    *      the scale and zeroPoint must be the same as input0.
2352    *      If all input dimensions are equal to 1 and are to be squeezed, the
2353    *      output shape is [1].
2354    *
2355    * Available since NNAPI feature level 2.
2356    */
2357   ANEURALNETWORKS_SQUEEZE = 34,
2358 
2359   /**
2360    * Extracts a strided slice of a tensor.
2361    *
2362    * Roughly speaking, this op extracts a slice of size (end - begin) / stride
2363    * from the given input tensor. Starting at the location specified by begin
2364    * the slice continues by adding stride to the index until all dimensions
2365    * are not less than end. Note that a stride can be negative, which causes a
2366    * reverse slice.
2367    *
2368    * Supported tensor {@link OperandCode}:
2369    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2370    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2371    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2372    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2373    *
2374    * Supported tensor rank: up to 4
2375    *
2376    * Inputs:
2377    * * 0: An n-D tensor, specifying the tensor to be sliced.
2378    * * 1: begin, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
2379    *      starts of the dimensions of the input tensor to be sliced. The
2380    *      length must be of rank(input0).
2381    * * 2: end, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
2382    *      ends of the dimensions of the input tensor to be sliced. The length
2383    *      must be of rank(input0).
2384    * * 3: strides, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
2385    *      strides of the dimensions of the input tensor to be sliced. The
2386    *      length must be of rank(input0). The entries must be non-zero.
2387    * * 4: begin_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit
2388    *      of begin_mask is set, begin[i] is ignored and the fullest possible
2389    *      range in that dimension is used instead.
2390    * * 5: end_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit of
2391    *      end_mask is set, end[i] is ignored and the fullest possible range in
2392    *      that dimension is used instead.
2393    * * 6: shrink_axis_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the
2394    *      ith bit of shrink_axis_mask is set, the ith dimension specification
2395    *      shrinks the dimensionality by 1, taking on the value at index
2396    *      begin[i]. In this case, the ith specification must define a
2397    *      slice of size 1, e.g. begin[i] = x, end[i] = x + 1.
2398    *
2399    * Outputs:
2400    * * 0: A tensor of the same {@link OperandCode} as input0 and rank (n - k),
2401    *      where k is the number of bits set in shrink_axis_mask.
2402    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2403    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2404    *      the scale and zeroPoint must be the same as input0.
2405    *      If shrink_axis_mask is true for all input dimensions, the output
2406    *      shape is [1].
2407    *
2408    * Available since NNAPI feature level 2.
2409    */
2410   ANEURALNETWORKS_STRIDED_SLICE = 35,
2411 
2412   /**
2413    * Element-wise subtraction of two tensors.
2414    *
2415    * Takes two input tensors of identical {@link OperandCode} and compatible
2416    * dimensions. The output is the result of subtracting the second input
2417    * tensor from the first one, optionally modified by an activation function.
2418    *
2419    * Two dimensions are compatible when:
2420    *     1. they are equal, or
2421    *     2. one of them is 1
2422    *
2423    * The size of the output is the maximum size along each dimension of the
2424    * input operands. It starts with the trailing dimensions, and works its way
2425    * forward.
2426    *
2427    * Example:
2428    *     input1.dimension =    {4, 1, 2}
2429    *     input2.dimension = {5, 4, 3, 1}
2430    *     output.dimension = {5, 4, 3, 2}
2431    *
2432    * Since NNAPI feature level 3, generic zero-sized input tensor is supported. Zero
2433    * dimension is only compatible with 0 or 1. The size of the output
2434    * dimension is zero if either of corresponding input dimension is zero.
2435    *
2436    * Supported tensor {@link OperandCode}:
2437    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2438    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2439    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3)
2440    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2441    * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4)
2442    *
2443    * Supported tensor rank: up to 4
2444    *
2445    * Inputs:
2446    * * 0: An n-D tensor, specifying the first input.
2447    * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
2448    *      as input0.
2449    * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
2450    *      {@link FuseCode} values. Specifies the activation to
2451    *      invoke on the result.
2452    *      For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,
2453    *      the {@link FuseCode} must be "NONE".
2454    *
2455    * Outputs:
2456    * * 0: A tensor of the same {@link OperandCode} as input0.
2457    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2458    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2459    *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
2460    *
2461    * Available since NNAPI feature level 2.
2462    */
2463   ANEURALNETWORKS_SUB = 36,
2464 
2465   /**
2466    * Transposes the input tensor, permuting the dimensions according to the
2467    * perm tensor.
2468    *
2469    * The returned tensor's dimension i corresponds to the input dimension
2470    * perm[i]. If perm is not given, it is set to (n-1...0), where n is the
2471    * rank of the input tensor. Hence by default, this operation performs a
2472    * regular matrix transpose on 2-D input Tensors.
2473    *
2474    * Supported tensor {@link OperandCode}:
2475    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2476    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2477    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2478    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2479    *
2480    * Supported tensor rank: up to 4
2481    *
2482    * Inputs:
2483    * * 0: An n-D tensor, specifying the tensor to be transposed.
2484    *      Since NNAPI feature level 3, this tensor may be zero-sized.
2485    * * 1: An optional 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32},
2486    *      the permutation of the dimensions of the input tensor.
2487    *
2488    * Outputs:
2489    * * 0: A tensor of the same {@link OperandCode} as input0.
2490    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2491    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2492    *      the scale and zeroPoint must be the same as input0.
2493    *
2494    * Available since NNAPI feature level 2.
2495    */
2496   ANEURALNETWORKS_TRANSPOSE = 37,
2497 
2498   // Operations below are available since NNAPI feature level 3.
2499 
2500   /**
2501    * Computes the absolute value of a tensor, element-wise.
2502    *
2503    * Supported tensor {@link OperandCode}:
2504    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2505    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2506    * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4)
2507    *
2508    * Supported tensor rank: from 1.
2509    *
2510    * Inputs:
2511    * * 0: A tensor.
2512    *
2513    * Outputs:
2514    * * 0: The output tensor of same shape as input0.
2515    *
2516    * Available since NNAPI feature level 3.
2517    */
2518   ANEURALNETWORKS_ABS = 38,
2519 
2520   /**
2521    * Returns the index of the largest element along an axis.
2522    *
2523    * Supported tensor {@link OperandCode}:
2524    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2525    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2526    * * {@link ANEURALNETWORKS_TENSOR_INT32}
2527    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2528    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2529    *
2530    * Supported tensor rank: from 1
2531    *
2532    * Inputs:
2533    * * 0: An n-D tensor specifying the input. Must be non-empty.
2534    * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to
2535    *      reduce across. Negative index is used to specify axis from the
2536    *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
2537    *
2538    * Outputs:
2539    * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor.
2540    *      If input is 1-dimensional, the output shape is [1].
2541    *
2542    * Available since NNAPI feature level 3.
2543    */
2544   // There is no underscore in ARG_MAX to avoid name conflict with
2545   // the macro defined in libc/kernel/uapi/linux/limits.h.
2546   ANEURALNETWORKS_ARGMAX = 39,
2547 
2548   /**
2549    * Returns the index of the smallest element along an axis.
2550    *
2551    * Supported tensor {@link OperandCode}:
2552    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2553    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2554    * * {@link ANEURALNETWORKS_TENSOR_INT32}
2555    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2556    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2557    *
2558    * Supported tensor rank: from 1
2559    *
2560    * Inputs:
2561    * * 0: An n-D tensor specifying the input. Must be non-empty.
2562    * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to
2563    *      reduce across. Negative index is used to specify axis from the
2564    *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
2565    *
2566    * Outputs:
2567    * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor.
2568    *      If input is 1-dimensional, the output shape is [1].
2569    *
2570    * Available since NNAPI feature level 3.
2571    */
2572   ANEURALNETWORKS_ARGMIN = 40,  // See ARGMAX for naming discussion.
2573 
2574   /**
2575    * Transform axis-aligned bounding box proposals using bounding box deltas.
2576    *
2577    * Given the positions of bounding box proposals and the corresponding
2578    * bounding box deltas for each class, return the refined bounding box
2579    * regions. The resulting bounding boxes are cliped against the edges of
2580    * the image.
2581    *
2582    * Supported tensor {@link OperandCode}:
2583    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2584    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2585    * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}
2586    *
2587    * Inputs:
2588    * * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the
2589    *      bounding box proposals, each line with format [x1, y1, x2, y2].
2590    *      For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},
2591    *      the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois
2592    *      is supported for this tensor.
2593    * * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the
2594    *      bounding box delta for each region of interest and each class. The
2595    *      bounding box deltas are organized in the following order
2596    *      [dx, dy, dw, dh], where dx and dy is the relative correction factor
2597    *      for the center position of the bounding box with respect to the width
2598    *      and height, dw and dh is the log-scale relative correction factor
2599    *      for the width and height. For input0 of type
2600    *      {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, this tensor should be
2601    *      of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
2602    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}. Zero num_rois is
2603    *      supported for this tensor.
2604    * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
2605    *      [num_rois], specifying the batch index of each box. Boxes with
2606    *      the same batch index are grouped together. Zero num_rois is
2607    *      supported for this tensor.
2608    * * 3: A 2-D Tensor of shape [batches, 2], specifying the information of
2609    *      each image in the batch, each line with format
2610    *      [image_height, image_width].
2611    *
2612    * Outputs:
2613    * * 0: A tensor of the same {@link OperandCode} as input0, with shape
2614    *      [num_rois, num_classes * 4], specifying the coordinates of each
2615    *      output bounding box for each class, with format [x1, y1, x2, y2].
2616    *      For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the
2617    *      scale must be 0.125 and the zero point must be 0.
2618    *
2619    * Available since NNAPI feature level 3.
2620    */
2621   ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM = 41,
2622 
2623   /**
2624    * A recurrent neural network layer that applies an LSTM cell to a
2625    * sequence of inputs in forward and backward directions.
2626    *
2627    * The op supports cross-linking via an auxiliary input. Regular cell feeds
2628    * one input into the two RNN cells in the following way:
2629    *
2630    *       INPUT  (INPUT_REVERSED)
2631    *         |         |
2632    *    ---------------------
2633    *    | FW_LSTM   BW_LSTM |
2634    *    ---------------------
2635    *         |         |
2636    *      FW_OUT     BW_OUT
2637    *
2638    * An op with cross-linking takes two inputs and feeds them into the RNN
2639    * cells in the following way:
2640    *
2641    *       AUX_INPUT   (AUX_INPUT_REVERSED)
2642    *           |             |
2643    *     INPUT | (INPUT_R'D.)|
2644    *       |   |       |     |
2645    *    -----------------------
2646    *    |  \  /        \    / |
2647    *    | FW_LSTM     BW_LSTM |
2648    *    -----------------------
2649    *         |           |
2650    *      FW_OUT      BW_OUT
2651    *
2652    * The cross-linking mode is enabled iff auxiliary input and auxiliary
2653    * weights are present. While stacking this op on top of itself, this
2654    * allows to connect both forward and backward outputs from previous cell
2655    * to the next cell's input.
2656    *
2657    * Since NNAPI feature level 4 parallel linking mode is supported. The mode is
2658    * enabled if auxiliary input is present but auxiliary weights are omitted.
2659    * In this case, the cell feeds inputs into the RNN in the following way:
2660    *
2661    *       INPUT (AUX_INPUT_REVERSED)
2662    *         |         |
2663    *    ---------------------
2664    *    | FW_LSTM   BW_LSTM |
2665    *    ---------------------
2666    *         |         |
2667    *      FW_OUT     BW_OUT
2668    *
2669    * While stacking this op on top of itself, this allows to connect both
2670    * forward and backward outputs from previous cell to the next cell's
2671    * corresponding inputs.
2672    *
2673    * Supported tensor {@link OperandCode}:
2674    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2675    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2676    *
2677    * Supported tensor rank: 3, either time-major or batch-major.
2678    *
2679    * All input and output tensors must be of the same type.
2680    *
2681    * Inputs:
2682    * * 0: The input.
2683    *      A 3-D tensor of shape:
2684    *        If time-major: [max_time, batch_size, input_size]
2685    *        If batch-major: [batch_size, max_time, input_size]
2686    *      where "max_time" is the number of timesteps (sequence length),
2687    *      "batch_size" corresponds to the batching dimension, and
2688    *      "input_size" is the size of the input.
2689    * * 1: The forward input-to-input weights. Optional.
2690    *      A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units”
2691    *      corresponds to the number of forward cell units.
2692    * * 2: The forward input-to-forget weights.
2693    *      A 2-D tensor of shape [fw_num_units, input_size].
2694    * * 3: The forward input-to-cell weights.
2695    *      A 2-D tensor of shape [fw_num_units, input_size].
2696    * * 4: The forward input-to-output weights.
2697    *      A 2-D tensor of shape [fw_num_units, input_size].
2698    * * 5: The forward recurrent-to-input weights. Optional.
2699    *      A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size”
2700    *      corresponds to either the number of cell units (i.e., fw_num_units),
2701    *      or the second dimension of the “fw_projection_weights”, if defined.
2702    * * 6: The forward recurrent-to-forget weights.
2703    *      A 2-D tensor of shape [fw_num_units, fw_output_size].
2704    * * 7: The forward recurrent-to-cell weights.
2705    *      A 2-D tensor of shape [fw_num_units, fw_output_size].
2706    * * 8: The forward recurrent-to-output weights.
2707    *      A 2-D tensor of shape [fw_num_units, fw_output_size].
2708    * * 9: The forward cell-to-input weights. Optional.
2709    *      A 1-D tensor of shape [fw_num_units].
2710    * * 10: The forward cell-to-forget weights. Optional.
2711    *       A 1-D tensor of shape [fw_num_units].
2712    * * 11: The forward cell-to-output weights. Optional.
2713    *       A 1-D tensor of shape [fw_num_units].
2714    * * 12: The forward input gate bias. Optional.
2715    *       A 1-D tensor of shape [fw_num_units].
2716    * * 13: The forward forget gate bias.
2717    *       A 1-D tensor of shape [fw_num_units].
2718    * * 14: The forward cell gate bias.
2719    *       A 1-D tensor of shape [fw_num_units].
2720    * * 15: The forward output gate bias.
2721    *       A 1-D tensor of shape [fw_num_units].
2722    * * 16: The forward projection weights. Optional.
2723    *       A 2-D tensor of shape [fw_output_size, fw_num_units].
2724    * * 17: The forward projection bias. Optional.
2725    *       A 1-D tensor of shape [fw_output_size].
2726    * * 18: The backward input-to-input weights. Optional.
2727    *       A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units”
2728    *       corresponds to the number of backward cell units.
2729    * * 19: The backward input-to-forget weights.
2730    *       A 2-D tensor of shape [bw_num_units, input_size].
2731    * * 20: The backward input-to-cell weights.
2732    *       A 2-D tensor of shape [bw_num_units, input_size].
2733    * * 21: The backward input-to-output weights.
2734    *       A 2-D tensor of shape [bw_num_units, input_size].
2735    * * 22: The backward recurrent-to-input weights. Optional.
2736    *       A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size”
2737    *       corresponds to either the number of cell units (i.e., “bw_num_units”),
2738    *       or the second dimension of the “bw_projection_weights”, if defined.
2739    * * 23: The backward recurrent-to-forget weights.
2740    *       A 2-D tensor of shape [bw_num_units, bw_output_size].
2741    * * 24: The backward recurrent-to-cell weights.
2742    *       A 2-D tensor of shape [bw_num_units, bw_output_size].
2743    * * 25: The backward recurrent-to-output weights.
2744    *       A 2-D tensor of shape [bw_num_units, bw_output_size].
2745    * * 26: The backward cell-to-input weights. Optional.
2746    *       A 1-D tensor of shape [bw_num_units].
2747    * * 27: The backward cell-to-forget weights. Optional.
2748    *       A 1-D tensor of shape [bw_num_units].
2749    * * 28: The backward cell-to-output weights. Optional.
2750    *       A 1-D tensor of shape [bw_num_units].
2751    * * 29: The backward input gate bias. Optional.
2752    *       A 1-D tensor of shape [bw_num_units].
2753    * * 30: The backward forget gate bias.
2754    *       A 1-D tensor of shape [bw_num_units].
2755    * * 31: The backward cell gate bias.
2756    *       A 1-D tensor of shape [bw_num_units].
2757    * * 32: The backward output gate bias.
2758    *       A 1-D tensor of shape [bw_num_units].
2759    * * 33: The backward projection weights. Optional.
2760    *       A 2-D tensor of shape [bw_output_size, bw_num_units].
2761    * * 34: The backward projection bias. Optional.
2762    *       A 1-D tensor of shape [bw_output_size].
2763    * * 35: The forward input activation state.
2764    *       A 2-D tensor of shape [batch_size, bw_output_size].
2765    * * 36: The forward input cell state.
2766    *       A 2-D tensor of shape [batch_size, bw_num_units].
2767    * * 37: The backward input activation state.
2768    *       A 2-D tensor of shape [batch_size, bw_output_size].
2769    * * 38: The backward input cell state.
2770    *       A 2-D tensor of shape [batch_size, bw_num_units].
2771    * * 39: The auxiliary input. Optional.
2772    *       A 3-D tensor of shape [max_time, batch_size, aux_input_size],
2773    *       where “batch_size” corresponds to the batching dimension, and
2774    *       “aux_input_size” is the size of the auxiliary input. Optional. See
2775    *       the docs above for the usage modes explanation.
2776    * * 40: The forward auxiliary input-to-input weights.
2777    *       Optional. See the docs above for the usage modes explanation.
2778    *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2779    * * 41: The forward auxiliary input-to-forget weights.
2780    *       Optional. See the docs above for the usage modes explanation.
2781    *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2782    * * 42: The forward auxiliary input-to-cell weights.
2783    *       Optional. See the docs above for the usage modes explanation.
2784    *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2785    * * 43: The forward auxiliary input-to-output weights.
2786    *       Optional. See the docs above for the usage modes explanation.
2787    *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2788    * * 44: The backward auxiliary input-to-input weights.
2789    *       Optional. See the docs above for the usage modes explanation.
2790    *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2791    * * 45: The backward auxiliary input-to-forget weights.
2792    *       Optional. See the docs above for the usage modes explanation.
2793    *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2794    * * 46: The backward auxiliary input-to-cell weights.
2795    *       Optional. See the docs above for the usage modes explanation.
2796    *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2797    * * 47: The backward auxiliary input-to-output weights.
2798    *       Optional. See the docs above for the usage modes explanation.
2799    *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2800    * * 48: The activation function.
2801    *       A value indicating the activation function:
2802    *       <ul>
2803    *       <li>0: None;
2804    *       <li>1: Relu;
2805    *       <li>3: Relu6;
2806    *       <li>4: Tanh;
2807    *       <li>6: Sigmoid.
2808    *       </ul>
2809    * * 49: The clipping threshold for the cell state, such
2810    *       that values are bound within [-cell_clip, cell_clip]. If set to 0.0
2811    *       then clipping is disabled.
2812    *       If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32},
2813    *       this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
2814    *       otherwise if all the input tensors have the type
2815    *       {@link ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be
2816    *       of type {@link ANEURALNETWORKS_FLOAT16}.
2817    * * 50: The clipping threshold for the output from the
2818    *       projection layer, such that values are bound within
2819    *       [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
2820    *       If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32},
2821    *       this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
2822    *       otherwise if all the input tensors have the type
2823    *       {@link ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be
2824    *       of type {@link ANEURALNETWORKS_FLOAT16}.
2825    * * 51: merge_outputs
2826    *       An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs
2827    *       from forward and backward cells should be merged.
2828    * * 52: time_major
2829    *       An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format
2830    *       of input and output tensors.
2831    * * 53: The forward input layer normalization weights. Optional.
2832    *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2833    *       to activation at input gate.
2834    * * 54: The forward forget layer normalization weights. Optional.
2835    *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2836    *       to activation at forget gate.
2837    * * 55: The forward cell layer normalization weights. Optional.
2838    *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2839    *       to activation at cell gate.
2840    * * 56: The forward output layer normalization weights. Optional.
2841    *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2842    *       to activation at output gate.
2843    * * 57: The backward input layer normalization weights. Optional.
2844    *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2845    *       to activation at input gate.
2846    * * 58: The backward forget layer normalization weights. Optional.
2847    *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2848    *       to activation at forget gate.
2849    * * 59: The backward cell layer normalization weights. Optional.
2850    *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2851    *       to activation at cell gate.
2852    * * 60: The backward output layer normalization weights. Optional.
2853    *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2854    *       to activation at output gate.
2855    *
2856    * Outputs:
2857    * * 0: The forward output.
2858    *      A 3-D tensor of shape:
2859    *        If time-major and not merge_outputs:
2860    *          [max_time, batch_size, fw_output_size]
2861    *        If time-major and merge_outputs:
2862    *          [max_time, batch_size, fw_output_size + bw_output_size]
2863    *        If batch-major and not merge_outputs:
2864    *          [batch_size, max_time, fw_output_size]
2865    *        If batch-major and merge_outputs:
2866    *          [batch_size, max_time, fw_output_size + bw_output_size]
2867    * * 1: The backward output.  Unused if merge_outputs is true.
2868    *      A 3-D tensor of shape:
2869    *        If time-major: [max_time, batch_size, bw_output_size]
2870    *        If batch-major: [batch_size, max_time, bw_output_size]
2871    * * 2: The forward activation state output.
2872    *      A 2-D tensor of shape [batch_size, fw_output_size] containing an
2873    *      activation state from the last time step in the sequence. This
2874    *      output is optional and can be omitted. If this output is present
2875    *      then outputs 3-5 must be present as well.
2876    *      Available since NNAPI feature level 4.
2877    * * 3: The forward cell state output.
2878    *      A tensor of shape [batch_size, fw_cell_size] containing a cell state
2879    *      from the last time step in the sequence. This output is optional
2880    *      and can be omitted. If this output is present
2881    *      then outputs 2, 4, 5 must be present as well.
2882    *      Available since NNAPI feature level 4.
2883    * * 4: The backward activation state output.
2884    *      A 2-D tensor of shape [batch_size, bw_output_size] containing an
2885    *      activation state from the last time step in the sequence. This
2886    *      output is optional and can be omitted. If this output is present
2887    *      then outputs 2, 3, 5 must be present as well.
2888    *      Available since NNAPI feature level 4.
2889    * * 5: The backward cell state output.
2890    *      A tensor of shape [batch_size, bw_cell_size] containing a cell state
2891    *      from the last time step in the sequence. This output is optional
2892    *      and can be omitted. If this output is present
2893    *      then outputs 2-4 must be present as well.
2894    *      Available since NNAPI feature level 4.
2895    *
2896    * Available since NNAPI feature level 3.
2897    *
2898    * Important: As of NNAPI feature level 3, there is no way to get the output state tensors out
2899    * and NNAPI does not maintain internal states. This operator does not support the usage pattern
2900    * in which multiple cells are chained and state tensors are propagated.
2901    */
2902   ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM = 42,
2903 
2904   /**
2905    * A recurrent neural network layer that applies a basic RNN cell to a
2906    * sequence of inputs in forward and backward directions.
2907    *
2908    * This Op unrolls the input along the sequence dimension, and implements
2909    * the following operation for each element in the sequence s =
2910    * 1...sequence_length:
2911    *   fw_outputs[s] = fw_state = activation(inputs[s] * fw_input_weights’ +
2912    *          fw_state * fw_recurrent_weights’ + fw_bias)
2913    *
2914    * And for each element in sequence t = sequence_length : 1
2915    *   bw_outputs[t] = bw_state = activation(inputs[t] * bw_input_weights’ +
2916    *          bw_state * bw_recurrent_weights’ + bw_bias)
2917    *
2918    * Where:
2919    * * “{fw,bw}_input_weights” is a weight matrix that multiplies the inputs;
2920    * * “{fw,bw}_recurrent_weights” is a weight matrix that multiplies the
2921    *    current “state” which itself is the output from the previous time step
2922    *    computation;
2923    * * “{fw,bw}_bias” is a bias vector (added to each output vector in the
2924    *    batch);
2925    * * “activation” is the function passed as the “fused_activation_function”
2926    *   argument (if not “NONE”).
2927    *
2928    * The op supports cross-linking via an auxiliary input. Regular cell feeds
2929    * one input into the two RNN cells in the following way:
2930    *
2931    *       INPUT  (INPUT_REVERSED)
2932    *         |         |
2933    *    ---------------------
2934    *    | FW_RNN     BW_RNN |
2935    *    ---------------------
2936    *         |         |
2937    *      FW_OUT     BW_OUT
2938    *
2939    * An op with cross-linking takes two inputs and feeds them into the RNN
2940    * cells in the following way:
2941    *
2942    *       AUX_INPUT   (AUX_INPUT_REVERSED)
2943    *           |             |
2944    *     INPUT | (INPUT_R'D.)|
2945    *       |   |       |     |
2946    *    -----------------------
2947    *    |  \  /        \    / |
2948    *    | FW_RNN       BW_RNN |
2949    *    -----------------------
2950    *         |           |
2951    *      FW_OUT      BW_OUT
2952    *
2953    * The cross-linking mode is enabled iff auxiliary input and auxiliary
2954    * weights are present. While stacking this op on top of itself, this
2955    * allows to connect both forward and backward outputs from previous cell
2956    * to the next cell's input.
2957    *
2958    * Since NNAPI feature level 4 parallel linking mode is supported. The mode is
2959    * enabled if auxiliary input is present but auxiliary weights are omitted.
2960    * In this case, the cell feeds inputs into the RNN in the following way:
2961    *
2962    *       INPUT (AUX_INPUT_REVERSED)
2963    *         |         |
2964    *    ---------------------
2965    *    | FW_RNN     BW_RNN |
2966    *    ---------------------
2967    *         |         |
2968    *      FW_OUT     BW_OUT
2969    *
2970    * While stacking this op on top of itself, this allows to connect both
2971    * forward and backward outputs from previous cell to the next cell's
2972    * corresponding inputs.
2973    *
2974    * Supported tensor {@link OperandCode}:
2975    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2976    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2977    *
2978    * The input tensors must all be the same type.
2979    *
2980    * Inputs:
2981    * * 0: input.
2982    *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
2983    *      it is set to true, then the input has a shape [maxTime, batchSize,
2984    *      inputSize], otherwise the input has a shape [batchSize, maxTime,
2985    *      inputSize].
2986    * * 1: fwWeights.
2987    *      A 2-D tensor of shape [fwNumUnits, inputSize].
2988    * * 2: fwRecurrentWeights.
2989    *      A 2-D tensor of shape [fwNumUnits, fwNumUnits].
2990    * * 3: fwBias.
2991    *      A 1-D tensor of shape [fwNumUnits].
2992    * * 4: fwHiddenState.
2993    *      A 2-D tensor of shape [batchSize, fwNumUnits]. Specifies a hidden
2994    *      state input for the first time step of the computation.
2995    * * 5: bwWeights.
2996    *      A 2-D tensor of shape [bwNumUnits, inputSize].
2997    * * 6: bwRecurrentWeights.
2998    *      A 2-D tensor of shape [bwNumUnits, bwNumUnits].
2999    * * 7: bwBias.
3000    *      A 1-D tensor of shape [bwNumUnits].
3001    * * 8: bwHiddenState
3002    *      A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden
3003    *      state input for the first time step of the computation.
3004    * * 9: auxInput.
3005    *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
3006    *      it is set to true, then the input has a shape [maxTime, batchSize,
3007    *      auxInputSize], otherwise the input has a shape [batchSize, maxTime,
3008    *      auxInputSize]. Can be omitted. See the docs above for the usage
3009    *      modes explanation.
3010    * * 10:fwAuxWeights.
3011    *      A 2-D tensor of shape [fwNumUnits, auxInputSize]. Can be omitted.
3012    *      See the docs above for the usage modes explanation.
3013    * * 11:bwAuxWeights.
3014    *      A 2-D tensor of shape [bwNumUnits, auxInputSize]. Can be omitted.
3015    *      See the docs above for the usage modes explanation.
3016    * * 12:fusedActivationFunction.
3017    *      A {@link FuseCode} value indicating the activation function. If
3018    *      “NONE” is specified then it results in a linear activation.
3019    * * 13:timeMajor
3020    *      An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format
3021    *      of input and output tensors.
3022    * * 14:mergeOutputs
3023    *      An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs
3024    *      from forward and backward cells are separate (if set to false) or
3025    *      concatenated (if set to true).
3026    * Outputs:
3027    * * 0: fwOutput.
3028    *      A 3-D tensor. The first two dimensions of the shape are defined by
3029    *      the input 6 (timeMajor) and the third dimension is defined by the
3030    *      input 14 (mergeOutputs). If timeMajor is set to true, then the first
3031    *      two dimensions are [maxTime, batchSize], otherwise they are set to
3032    *      [batchSize, maxTime]. If mergeOutputs is set to true, then the third
3033    *      dimension is equal to (fwNumUnits + bwNumUnits), otherwise it is set
3034    *      to fwNumUnits.
3035    * * 1: bwOutput.
3036    *      A 3-D tensor. If the input 14 (mergeOutputs) is set to true, then
3037    *      this tensor is not produced. The shape is defined by the input 6
3038    *      (timeMajor). If it is set to true, then the shape is set to
3039    *      [maxTime, batchSize, bwNumUnits], otherwise the shape is set to
3040    *      [batchSize, maxTime, bwNumUnits].
3041    * * 2: The forward hidden state output.
3042    *      A 2-D tensor of shape [batchSize, fwNumUnits] containing a hidden
3043    *      state from the last time step in the sequence. This output is
3044    *      optional and can be omitted. If this output is present then output
3045    *      3 must be present as well.
3046    *      Available since NNAPI feature level 4.
3047    * * 3: The backward hidden state output.
3048    *      A 2-D tensor of shape [batchSize, bwNumUnits] containing a hidden
3049    *      state from the last time step in the sequence. This output is
3050    *      optional and can be omitted. If this output is present then output
3051    *      2 must be present as well.
3052    *      Available since NNAPI feature level 4.
3053    *
3054    * Available since NNAPI feature level 3.
3055    *
3056    * Important: As of NNAPI feature level 3, there is no way to get the output state tensors out
3057    * and NNAPI does not maintain internal states. This operator does not support the usage pattern
3058    * in which multiple cells are chained and state tensors are propagated.
3059    */
3060   ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN = 43,
3061 
3062   /**
3063    * Greedily selects a subset of bounding boxes in descending order of score.
3064    *
3065    * This op applies NMS algorithm to each class. In each loop of execution,
3066    * the box with maximum score gets selected and removed from the pending set.
3067    * The scores of the rest of boxes are lowered according to the
3068    * intersection-over-union (IOU) overlapping with the previously selected
3069    * boxes and a specified NMS kernel method. Any boxes with score less
3070    * than a threshold are removed from the pending set.
3071    *
3072    * Three NMS kernels are supported:
3073    * * Hard:     score_new = score_old * (1 if IoU < threshold else 0)
3074    * * Linear:   score_new = score_old * (1 if IoU < threshold else 1 - IoU)
3075    * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma)
3076    *
3077    * Axis-aligned bounding boxes are represented by its upper-left corner
3078    * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
3079    * bounding box should satisfy x1 <= x2 and y1 <= y2.
3080    *
3081    * Supported tensor {@link OperandCode}:
3082    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3083    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3084    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3085    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3086    *
3087    * Inputs:
3088    * * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score
3089    *      of each bounding box proposal. The boxes are grouped by batches in the
3090    *      first dimension. Zero num_rois is supported for this tensor.
3091    * * 1: A 2-D Tensor specifying the bounding boxes of shape
3092    *      [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2].
3093    *      The boxes are grouped by batches in the first dimension. The sequential
3094    *      order of the boxes corresponds with input0. For input0 of type
3095    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should be of
3096    *      {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and
3097    *      scale of 0.125.
3098    *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
3099    *      this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},
3100    *      with zeroPoint of -128 and scale of 0.125.
3101    *      Zero num_rois is supported for this tensor.
3102    * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3103    *      [num_rois], specifying the batch index of each box. Boxes with
3104    *      the same batch index are grouped together.
3105    * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, score_threshold. Boxes
3106    *      with scores lower than the threshold are filtered before sending
3107    *      to the NMS algorithm.
3108    * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum
3109    *      number of selected bounding boxes for each image. Set to a negative
3110    *      value for unlimited number of output bounding boxes.
3111    * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the NMS
3112    *      kernel method, options are 0:hard, 1:linear, 2:gaussian.
3113    * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU
3114    *      threshold in hard and linear NMS kernel. This field is ignored if
3115    *      gaussian kernel is selected.
3116    * * 7: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the sigma in
3117    *      gaussian NMS kernel. This field is ignored if gaussian kernel is
3118    *      not selected.
3119    * * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, nms_score_threshold.
3120    *      Boxes with scores lower than the threshold are dropped during the
3121    *      score updating phase in soft NMS.
3122    *
3123    * Outputs:
3124    * * 0: A 1-D Tensor of the same {@link OperandCode} as input0, with shape
3125    *      [num_output_rois], specifying the score of each output box. The boxes
3126    *      are grouped by batches, but the sequential order in each batch is not
3127    *      guaranteed. For type of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
3128    *      guaranteed. For type of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3129    *      or {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
3130    *      the scale and zero point must be the same as input0.
3131    * * 1: A 2-D Tensor of the same {@link OperandCode} as input1, with shape
3132    *      [num_output_rois, 4], specifying the coordinates of each
3133    *      output bounding box with the same format as input1. The sequential
3134    *      order of the boxes corresponds with output0. For type of
3135    *      {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the scale must be
3136    *      0.125 and the zero point must be 0.
3137    * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3138    *      [num_output_rois], specifying the class of each output box. The
3139    *      sequential order of the boxes corresponds with output0.
3140    * * 3: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3141    *      [num_output_rois], specifying the batch index of each box. Boxes
3142    *      with the same batch index are grouped together.
3143    *
3144    * Available since NNAPI feature level 3.
3145    */
3146   ANEURALNETWORKS_BOX_WITH_NMS_LIMIT = 44,
3147 
3148   /**
3149    * Casts a tensor to a type.
3150    *
3151    * This operation ignores the scale and zeroPoint of quanized tensors,
3152    * e.g. it treats a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} input
3153    * as a tensor of uint8 values.
3154    *
3155    * Supported tensor {@link OperandCode}:
3156    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3157    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3158    * * {@link ANEURALNETWORKS_TENSOR_INT32}
3159    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3160    * Since NNAPI feature level 4, casting tensors of the following
3161    * {@link OperandCode} to the same {@link OperandCode} is supported:
3162    * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3163    * * {@link ANEURALNETWORKS_TENSOR_INT32}
3164    * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}
3165    * * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
3166    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
3167    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
3168    *
3169    * Supported tensor rank: from 1
3170    *
3171    * Inputs:
3172    * * 0: A tensor.
3173    *
3174    * Outputs:
3175    * * 0: A tensor with the same shape as input0.
3176    *
3177    * Available since NNAPI feature level 3.
3178    */
3179   ANEURALNETWORKS_CAST = 45,
3180 
3181   /**
3182    * Shuffle the channels of the input tensor.
3183    *
3184    * Given an input tensor and a integer value of num_groups, CHANNEL_SHUFFLE
3185    * divide the channel dimension into num_groups groups, and reorganize the
3186    * channels by grouping channels with the same index in each group.
3187    *
3188    * Along the channel dimension, the output is calculated using this formula:
3189    *
3190    *     output_channel[k * num_groups + g] = input_channel[g * group_size + k]
3191    *
3192    * where group_size = num_channels / num_groups
3193    *
3194    * The number of channels must be divisible by num_groups.
3195    *
3196    * Supported tensor {@link OperandCode}:
3197    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3198    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3199    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3200    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3201    *
3202    * Supported tensor rank: up to 4
3203    *
3204    * Inputs:
3205    * * 0: An n-D tensor, specifying the tensor to be shuffled.
3206    * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
3207    *      groups.
3208    * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the dimension
3209    *      channel shuffle would be performed on. Negative index is used to
3210    *      specify axis from the end (e.g. -1 for the last axis). Must be in
3211    *      the range [-n, n).
3212    *
3213    * Outputs:
3214    * * 0: A tensor of the same {@link OperandCode} and same shape as input0.
3215    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3216    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3217    *      the scale and zeroPoint must be the same as input0.
3218    *
3219    * Available since NNAPI feature level 3.
3220    */
3221   ANEURALNETWORKS_CHANNEL_SHUFFLE = 46,
3222 
3223   /**
3224    * Apply postprocessing steps to bounding box detections.
3225    *
3226    * Bounding box detections are generated by applying transformation on a set
3227    * of predefined anchors with the bounding box deltas from bounding box
3228    * regression. A final step of hard NMS is applied to limit the number of
3229    * returned boxes.
3230    *
3231    * Supported tensor {@link OperandCode}:
3232    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3233    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3234    *
3235    * Inputs:
3236    * * 0: A 3-D Tensor of shape [batches, num_anchors, num_classes], specifying
3237    *      the score of each anchor with each class. Class 0 for each
3238    *      [batches, num_anchors, 0] is background and will be ignored.
3239    * * 1: A 3-D Tensor of shape [batches, num_anchors, length_box_encoding], with
3240    *      the first four values in length_box_encoding specifying the bounding
3241    *      box deltas. The box deltas are encoded in the order of [dy, dx, dh, dw],
3242    *      where dy and dx is the linear-scale relative correction factor for the
3243    *      center position of the bounding box with respect to the width and height,
3244    *      dh and dw is the log-scale relative correction factor for the width and
3245    *      height. All the entries in length_box_encoding beyond the first four
3246    *      values are ignored in this operation.
3247    * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
3248    *      predefined anchor, with format [ctr_y, ctr_x, h, w], where ctr_y and
3249    *      ctr_x are the center position of the box, and h and w are the height
3250    *      and the width.
3251    * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
3252    *      factor for dy in bounding box deltas.
3253    * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
3254    *      factor for dx in bounding box deltas.
3255    * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
3256    *      factor for dh in bounding box deltas.
3257    * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
3258    *      factor for dw in bounding box deltas.
3259    * * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to use regular
3260    *      multi-class NMS algorithm that do NMS separately for each class,
3261    *      set to false for a faster algorithm that only do one single NMS
3262    *      using the highest class score..
3263    * * 8: An {@link ANEURALNETWORKS_INT32} scalar, max_num_detections, specifying
3264    *      the maximum number of boxes for the output. Boxes with the lowest
3265    *      scores are discarded to meet the limit.
3266    * * 9: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is
3267    *      set to false, specifying the maximum number of classes per detection.
3268    * * 10: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is
3269    *       set to true, specifying the maximum number of detections when
3270    *       applying NMS algorithm for each single class.
3271    * * 11: A scalar, score_threshold. Boxes with scores lower than the
3272    *       threshold are filtered before sending to the NMS algorithm. The
3273    *       scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of
3274    *       {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3275    *       {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3276    *       {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3277    * * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar
3278    *       must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of
3279    *       {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3280    *       {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3281    *       {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3282    * * 13: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to include
3283    *       background class in the list of label map for the output, set
3284    *       to false to not include the background. When the background
3285    *       class is included, it has label 0 and the output classes start
3286    *       at 1 in the label map, otherwise, the output classes start at 0.
3287    *
3288    * Outputs:
3289    * * 0: A 2-D tensor of the same {@link OperandCode} as input0, with shape
3290    *      [batches, max_num_detections], specifying the score of each output
3291    *      detections.
3292    * * 1: A 3-D tensor of shape [batches, max_num_detections, 4], specifying the
3293    *      coordinates of each output bounding box, with format
3294    *      [y1, x1, y2, x2].
3295    * * 2: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3296    *      [batches, max_num_detections], specifying the class label for each
3297    *      output detection.
3298    * * 3: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape [batches],
3299    *      specifying the number of valid output detections for each batch.
3300    *
3301    * Available since NNAPI feature level 3.
3302    */
3303   ANEURALNETWORKS_DETECTION_POSTPROCESSING = 47,
3304 
3305   /**
3306    * For input tensors x and y, computes x == y elementwise.
3307    *
3308    * Supported tensor {@link OperandCode}:
3309    * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3310    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3311    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3312    * * {@link ANEURALNETWORKS_TENSOR_INT32}
3313    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3314    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3315    *
3316    * Supported tensor rank: from 1
3317    *
3318    * This operation supports broadcasting.
3319    *
3320    * Inputs:
3321    * * 0: A tensor.
3322    * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3323    *      with input0.
3324    *
3325    * Outputs:
3326    * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3327    *
3328    * Available since NNAPI feature level 3.
3329    */
3330   ANEURALNETWORKS_EQUAL = 48,
3331 
3332   /**
3333    * Computes exponential of x element-wise.
3334    *
3335    * Supported tensor {@link OperandCode}:
3336    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3337    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3338    *
3339    * Supported tensor rank: from 1.
3340    *
3341    * Inputs:
3342    * * 0: A tensor.
3343    *
3344    * Outputs:
3345    * * 0: The output tensor of same shape as input0.
3346    *
3347    * Available since NNAPI feature level 3.
3348    */
3349   ANEURALNETWORKS_EXP = 49,
3350 
3351   /**
3352    * Inserts a dimension of 1 into a tensor's shape.
3353    *
3354    * Given a tensor input, this operation inserts a dimension of 1 at the
3355    * given dimension index of input's shape. The dimension index starts at
3356    * zero; if you specify a negative dimension index, it is counted backward
3357    * from the end.
3358    *
3359    * Supported tensor {@link OperandCode}:
3360    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3361    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3362    * * {@link ANEURALNETWORKS_TENSOR_INT32}
3363    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3364    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3365    *
3366    * Supported tensor rank: from 1
3367    *
3368    * Inputs:
3369    * * 0: An n-D tensor.
3370    * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the dimension
3371    *      index to expand. Must be in the range [-(n + 1), (n + 1)).
3372    *
3373    * Outputs:
3374    * * 0: An (n + 1)-D tensor with the same {@link OperandCode} and data as
3375    *      input0.
3376    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3377    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3378    *      the scale and zeroPoint must be the same as input0.
3379    *
3380    * Available since NNAPI feature level 3.
3381    */
3382   ANEURALNETWORKS_EXPAND_DIMS = 50,
3383 
3384   /**
3385    * Gathers values along an axis.
3386    *
3387    * Produces an output tensor with shape
3388    *     input0.dimension[:axis] + indices.dimension + input0.dimension[axis + 1:]
3389    * where:
3390    *     # Vector indices (output is rank(input0)).
3391    *     output[a_0, ..., a_n, i, b_0, ..., b_n] =
3392    *       input0[a_0, ..., a_n, indices[i], b_0, ..., b_n]
3393    *
3394    *     # Higher rank indices (output is rank(input0) + rank(indices) - 1).
3395    *     output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
3396    *       input0[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
3397    *
3398    * Supported tensor {@link OperandCode}:
3399    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3400    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3401    * * {@link ANEURALNETWORKS_TENSOR_INT32}
3402    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3403    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3404    *
3405    * Supported tensor rank: from 1
3406    *
3407    * Inputs:
3408    * * 0: An n-D tensor from which to gather values.
3409    * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis.
3410    *      Negative index is used to specify axis from the end
3411    *      (e.g. -1 for the last axis). Must be in the range [-n, n).
3412    * * 2: A k-D tensor {@link ANEURALNETWORKS_TENSOR_INT32} of indices.
3413    *      The values must be in the bounds of the corresponding dimensions
3414    *      of input0.
3415    *
3416    * Outputs:
3417    * * 0: An (n + k - 1)-D tensor with the same {@link OperandCode} as input0.
3418    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3419    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3420    *      the scale and zeroPoint must be the same as input0.
3421    *
3422    * Available since NNAPI feature level 3.
3423    */
3424   ANEURALNETWORKS_GATHER = 51,
3425 
3426   /**
3427    * Generate aixs-aligned bounding box proposals.
3428    *
3429    * Bounding box proposals are generated by applying transformation on a set
3430    * of predefined anchors with the bounding box deltas from bounding box
3431    * regression. A final step of hard NMS is applied to limit the number of
3432    * returned boxes.
3433    *
3434    * Axis-aligned bounding boxes are represented by its upper-left corner
3435    * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
3436    * bounding box should satisfy x1 <= x2 and y1 <= y2.
3437    *
3438    * Supported tensor {@link OperandCode}:
3439    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3440    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3441    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3442    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3443    *
3444    * Inputs:
3445    * * 0: A 4-D Tensor specifying the score of each anchor at each
3446    *      location. With "NHWC" data layout, the tensor shape is
3447    *      [batches, height, width, num_anchors]. With "NCHW" data layout,
3448    *      the tensor shape is [batches, num_anchors, height, width].
3449    * * 1: A 4-D Tensor specifying the bounding box deltas. With "NHWC" data
3450    *      layout, the tensor shape is [batches, height, width, num_anchors * 4].
3451    *      With "NCHW" data layout, the tensor shape is
3452    *      [batches, num_anchors * 4, height, width]. The box deltas are encoded
3453    *      in the order of [dx, dy, dw, dh], where dx and dy is the linear-scale
3454    *      relative correction factor for the center position of the bounding box
3455    *      with respect to the width and height, dw and dh is the log-scale
3456    *      relative correction factor for the width and height. The last
3457    *      dimensions is the channel dimension.
3458    * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
3459    *      predefined anchor, with format [x1, y1, x2, y2]. For input0 of type
3460    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
3461    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this tensor should be of
3462    *      {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with scale of 0.125.
3463    * * 3: A 2-D Tensor of shape [batches, 2], specifying the size of
3464    *      each image in the batch, with format [image_height, image_width].
3465    *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
3466    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this
3467    *      tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with
3468    *      scale of 0.125.
3469    * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
3470    *      from the height of original image to the height of feature map.
3471    * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
3472    *      from the width of original image to the width of feature map.
3473    * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum
3474    *      number of boxes before going into the hard NMS algorithm. Boxes
3475    *      with the lowest scores are discarded to meet the limit. Set to
3476    *      a non-positive value for unlimited number.
3477    * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum
3478    *      number of boxes returning from the hard NMS algorithm. Boxes
3479    *      with the lowest scores are discarded to meet the limit. Set to
3480    *      a non-positive value for unlimited number.
3481    * * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU
3482    *      threshold for hard NMS.
3483    * * 9: An {@link ANEURALNETWORKS_FLOAT32} scalar, min_size. Boxes with
3484    *      height or width lower than the absolute threshold are filtered out.
3485    * * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3486    *       NCHW data layout for input0 and input1. Set to false for NHWC.
3487    *
3488    * Outputs:
3489    * * 0: A tensor of the same {@link OperandCode} as input0, of shape
3490    *      [num_output_rois], specifying the score of each output box.
3491    *      The boxes are grouped by batches, but the sequential order in
3492    *      each batch is not guaranteed. For type of
3493    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
3494    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, the scale and zero
3495    *      point must be the same as input0.
3496    * * 1: A tensor of the same {@link OperandCode} as input3, of shape
3497    *      [num_output_rois, 4], specifying the coordinates of each output
3498    *      bounding box for each class, with format [x1, y1, x2, y2].
3499    *      The sequential order of the boxes corresponds with output0.
3500    *      For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the
3501    *      scale must be 0.125 and the zero point must be 0.
3502    * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3503    *      [num_output_rois], specifying the batch index of each box. Boxes
3504    *      with the same batch index are grouped together.
3505    *
3506    * Available since NNAPI feature level 3.
3507    */
3508   ANEURALNETWORKS_GENERATE_PROPOSALS = 52,
3509 
3510   /**
3511    * For input tensors x and y, computes x > y elementwise.
3512    *
3513    * Supported tensor {@link OperandCode}:
3514    * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3515    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3516    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3517    * * {@link ANEURALNETWORKS_TENSOR_INT32}
3518    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3519    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3520    *
3521    * Supported tensor rank: from 1
3522    *
3523    * This operation supports broadcasting.
3524    *
3525    * Inputs:
3526    * * 0: A tensor.
3527    * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3528    *      with input0.
3529    *
3530    * Outputs:
3531    * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3532    *
3533    * Available since NNAPI feature level 3.
3534    */
3535   ANEURALNETWORKS_GREATER = 53,
3536   /**
3537    * For input tensors x and y, computes x >= y elementwise.
3538    *
3539    * Supported tensor {@link OperandCode}:
3540    * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3541    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3542    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3543    * * {@link ANEURALNETWORKS_TENSOR_INT32}
3544    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3545    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3546    *
3547    * Supported tensor rank: from 1
3548    *
3549    * This operation supports broadcasting.
3550    *
3551    * Inputs:
3552    * * 0: A tensor.
3553    * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3554    *      with input0.
3555    *
3556    * Outputs:
3557    * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3558    *
3559    * Available since NNAPI feature level 3.
3560    */
3561   ANEURALNETWORKS_GREATER_EQUAL = 54,
3562 
3563   /**
3564    * Performs a grouped 2-D convolution operation.
3565    *
3566    * Given an input tensor of shape [batches, height, width, depth_in] and a
3567    * filter tensor of shape [depth_out, filter_height, filter_width, depth_group]
3568    * containing depth_out convolutional filters of depth depth_group, GROUPED_CONV
3569    * applies a group of different filters to each input channel group, then
3570    * concatenates the results together.
3571    *
3572    * Specifically, the input channels are divided into num_groups groups, each with
3573    * depth depth_group, i.e. depth_in = num_groups * depth_group. The convolutional
3574    * filters are also divided into num_groups groups, i.e. depth_out is divisible
3575    * by num_groups. GROUPED_CONV applies each group of filters to the corresponding
3576    * input channel group, and the result are concatenated together.
3577    *
3578    * The output dimensions are functions of the filter dimensions, stride, and
3579    * padding.
3580    *
3581    * The values in the output tensor are computed as:
3582    *
3583    *     output[b, i, j, g * channel_multiplier + q] =
3584    *         sum_{di, dj, dk} (
3585    *             input[b, strides[1] * i + di, strides[2] * j + dj,
3586    *                   g * depth_group + dk] *
3587    *             filter[g * channel_multiplier + q, di, dj, dk]
3588    *         ) + bias[channel]
3589    *
3590    * where channel_multiplier = depth_out / num_groups
3591    *
3592    * Supported tensor {@link OperandCode} configurations:
3593    * * 16 bit floating point:
3594    * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.
3595    *
3596    * * 32 bit floating point:
3597    * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.
3598    *
3599    * * Quantized:
3600    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.
3601    * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
3602    * * * input.scale * filter.scale).
3603    *
3604    * * Quantized signed (since NNAPI feature level 4):
3605    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
3606    * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
3607    * * * input.scale * filter.scale).
3608    *
3609    * * Quantized with symmetric per channel quantization for the filter:
3610    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.
3611    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
3612    * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
3613    * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
3614    *
3615    * * Quantized signed with filter symmetric per channel quantization
3616    *   (since NNAPI feature level 4):
3617    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
3618    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
3619    * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
3620    * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
3621    *
3622    * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
3623    * With the default data layout NHWC, the data is stored in the order of:
3624    * [batch, height, width, channels]. Alternatively, the data layout could
3625    * be NCHW, the data storage order of: [batch, channels, height, width].
3626    *
3627    * Both explicit padding and implicit padding are supported.
3628    *
3629    * Inputs (explicit padding):
3630    * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
3631    *      specifying the input, where depth_in = num_groups * depth_group.
3632    * * 1: A 4-D tensor, of shape
3633    *      [depth_out, filter_height, filter_width, depth_group], specifying
3634    *      the filter, where depth_out must be divisible by num_groups.  For
3635    *      tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
3636    *      the channel dimension (channelDim at
3637    *      {@link ANeuralNetworksSymmPerChannelQuantParams}) must be set to 0.
3638    * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
3639    *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
3640    *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same type.
3641    *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3642    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
3643    *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
3644    *      of 0 and bias_scale == input_scale * filter_scale. For filter tensor
3645    *      of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
3646    *      should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of
3647    *      0 and bias_scale of 0. The actual scale of each value 'i' is equal to
3648    *      bias_scale[i] = input_scale * filter_scale[i].
3649    * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
3650    *      the left, in the ‘width’ dimension.
3651    * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
3652    *      the right, in the ‘width’ dimension.
3653    * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
3654    *      the top, in the ‘height’ dimension.
3655    * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
3656    *      the bottom, in the ‘height’ dimension.
3657    * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
3658    *      walking through input in the ‘width’ dimension.
3659    * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
3660    *      walking through input in the ‘height’ dimension.
3661    * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
3662    *      groups.
3663    * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
3664    *       {@link FuseCode} values. Specifies the activation to
3665    *       invoke on the result.
3666    * * 11: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3667    *       NCHW data layout for input0 and output0. Set to false for NHWC.
3668    *
3669    * Inputs (implicit padding):
3670    * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
3671    *      specifying the input, where depth_in = num_groups * depth_group.
3672    * * 1: A 4-D tensor, of shape
3673    *      [depth_out, filter_height, filter_width, depth_group], specifying
3674    *      the filter, where depth_out must be divisible by num_groups.  For
3675    *      tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
3676    *      the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)
3677    *      must be set to 0.
3678    * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
3679    *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
3680    *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same
3681    *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same type.
3682    *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3683    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
3684    *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
3685    *      of 0 and bias_scale == input_scale * filter_scale. For filter tensor
3686    *      of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
3687    *      should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of
3688    *      0 and bias_scale of 0. The actual scale of each value 'i' is equal to
3689    *      bias_scale[i] = input_scale * filter_scale[i].
3690    * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
3691    *      padding scheme, has to be one of the
3692    *      {@link PaddingCode} values.
3693    * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
3694    *      walking through input in the ‘width’ dimension.
3695    * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
3696    *      walking through input in the ‘height’ dimension.
3697    * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
3698    *      groups.
3699    * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
3700    *      {@link FuseCode} values. Specifies the activation to
3701    *      invoke on the result.
3702    * * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3703    *      NCHW data layout for input0 and output0. Set to false for NHWC.
3704    *
3705    * Outputs:
3706    * * 0: The output 4-D tensor, of shape
3707    *      [batches, out_height, out_width, depth_out].
3708    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3709    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3710    *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
3711    *
3712    * Available since NNAPI feature level 3.
3713    */
3714   ANEURALNETWORKS_GROUPED_CONV_2D = 55,
3715 
3716   /**
3717    * Localize the maximum keypoints from heatmaps.
3718    *
3719    * This operation approximates the accurate maximum keypoint scores and
3720    * indices after bicubic upscaling by using Taylor expansion up to the
3721    * quadratic term.
3722    *
3723    * The bounding box is represented by its upper-left corner coordinate
3724    * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
3725    * A valid bounding box should satisfy x1 <= x2 and y1 <= y2.
3726    *
3727    * Supported tensor {@link OperandCode}:
3728    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3729    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3730    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3731    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3732    *
3733    * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
3734    * With the default data layout NHWC, the data is stored in the order of:
3735    * [batch, height, width, channels]. Alternatively, the data layout could
3736    * be NCHW, the data storage order of: [batch, channels, height, width].
3737    *
3738    * Inputs:
3739    * * 0: A 4-D Tensor of shape
3740    *      [num_boxes, heatmap_size, heatmap_size, num_keypoints],
3741    *      specifying the heatmaps, the height and width of heatmaps should
3742    *      be the same, and must be greater than or equal to 2.
3743    * * 1: A 2-D Tensor of shape [num_boxes, 4], specifying the bounding boxes,
3744    *      each with format [x1, y1, x2, y2]. For input0 of type
3745    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should
3746    *      be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint
3747    *      of 0 and scale of 0.125.
3748    *      For input0 of type
3749    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this tensor
3750    *      should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with
3751    *      zeroPoint of -128 and scale of 0.125.
3752    * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3753    *      NCHW data layout for input0. Set to false for NHWC.
3754    *
3755    * Outputs:
3756    * * 0: A tensor of the same {@link OperandCode} as input0, with shape
3757    *      [num_boxes, num_keypoints], specifying score of the keypoints.
3758    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
3759    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3760    *      the scale and zeroPoint can be different from input0 scale and zeroPoint.
3761    * * 1: A tensor of the same {@link OperandCode} as input1, with shape
3762    *      [num_boxes, num_keypoints, 2], specifying the location of
3763    *      the keypoints, the second dimension is organized as
3764    *      [keypoint_x, keypoint_y].
3765    *      For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the
3766    *      scale must be 0.125 and the zero point must be 0.
3767    *
3768    * Available since NNAPI feature level 3.
3769    */
3770   ANEURALNETWORKS_HEATMAP_MAX_KEYPOINT = 56,
3771 
3772   /**
3773    * Applies instance normalization to the input tensor.
3774    *
3775    * The values in the output tensor are computed as:
3776    *
3777    *     output[b, h, w, c] =
3778    *         (input[b, h, w, c] - mean[b, c]) * gamma /
3779    *         sqrt(var[b, c] + epsilon) + beta
3780    *
3781    * Where the mean and variance are computed across the spatial dimensions:
3782    *
3783    *     mean[b, c] =
3784    *         sum_{h, w}(input[b, h, w, c]) / sum(1)
3785    *
3786    *     var[b, c] =
3787    *         sum_{h, w}(pow(input[b, h, w, c] - mean[b, c], 2)) / sum(1)
3788    *
3789    * Supported tensor {@link OperandCode}:
3790    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3791    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3792    *
3793    * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
3794    * With the default data layout NHWC, the data is stored in the order of:
3795    * [batch, height, width, channels]. Alternatively, the data layout could
3796    * be NCHW, the data storage order of: [batch, channels, height, width].
3797    *
3798    * Inputs:
3799    * * 0: An n-D tensor, specifying the tensor to be normalized.
3800    * * 1: A scalar, specifying gamma, the scale applied to the normalized
3801    *      tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if
3802    *      input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3803    *      {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3804    *      {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3805    * * 2: A scalar, specifying beta, the offset applied to the normalized
3806    *      tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if
3807    *      input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3808    *      {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3809    *      {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3810    * * 3: A scalar, specifying epsilon, the small value added to variance to
3811    *      avoid dividing by zero. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if
3812    *      input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3813    *      {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3814    *      {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3815    * * 4: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3816    *      NCHW data layout for input0 and output0. Set to false for NHWC.
3817    *
3818    * Outputs:
3819    * * 0: A tensor of the same {@link OperandCode} and same shape as input0.
3820    *
3821    * Available since NNAPI feature level 3.
3822    */
3823   ANEURALNETWORKS_INSTANCE_NORMALIZATION = 57,
3824 
3825   /**
3826    * For input tensors x and y, computes x < y elementwise.
3827    *
3828    * Supported tensor {@link OperandCode}:
3829    * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3830    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3831    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3832    * * {@link ANEURALNETWORKS_TENSOR_INT32}
3833    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3834    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3835    *
3836    * Supported tensor rank: from 1
3837    *
3838    * This operation supports broadcasting.
3839    *
3840    * Inputs:
3841    * * 0: A tensor.
3842    * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3843    *      with input0.
3844    *
3845    * Outputs:
3846    * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3847    *
3848    * Available since NNAPI feature level 3.
3849    */
3850   ANEURALNETWORKS_LESS = 58,
3851 
3852   /**
3853    * For input tensors x and y, computes x <= y elementwise.
3854    *
3855    * Supported tensor {@link OperandCode}:
3856    * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3857    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3858    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3859    * * {@link ANEURALNETWORKS_TENSOR_INT32}
3860    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3861    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3862    *
3863    * Supported tensor rank: from 1
3864    *
3865    * This operation supports broadcasting.
3866    *
3867    * Inputs:
3868    * * 0: A tensor.
3869    * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3870    *      with input0.
3871    *
3872    * Outputs:
3873    * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3874    *
3875    * Available since NNAPI feature level 3.
3876    */
3877   ANEURALNETWORKS_LESS_EQUAL = 59,
3878 
3879   /**
3880    * Computes natural logarithm of x element-wise.
3881    *
3882    * Supported tensor {@link OperandCode}:
3883    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3884    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3885    *
3886    * Supported tensor rank: from 1.
3887    *
3888    * Inputs:
3889    * * 0: A tensor.
3890    *
3891    * Outputs:
3892    * * 0: The output tensor of same shape as input0.
3893    *
3894    * Available since NNAPI feature level 3.
3895    */
3896   ANEURALNETWORKS_LOG = 60,
3897 
3898   /**
3899    * Returns the truth value of x AND y element-wise.
3900    *
3901    * Supported tensor {@link OperandCode}:
3902    * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3903    *
3904    * Supported tensor rank: from 1
3905    *
3906    * This operation supports broadcasting.
3907    *
3908    * Inputs:
3909    * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3910    * * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions
3911    *      compatible with input0.
3912    *
3913    * Outputs:
3914    * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3915    *
3916    * Available since NNAPI feature level 3.
3917    */
3918   ANEURALNETWORKS_LOGICAL_AND = 61,
3919 
3920   /**
3921    * Computes the truth value of NOT x element-wise.
3922    *
3923    * Supported tensor {@link OperandCode}:
3924    * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3925    *
3926    * Supported tensor rank: from 1.
3927    *
3928    * Inputs:
3929    * * 0: A tensor.
3930    *
3931    * Outputs:
3932    * * 0: The output tensor of same shape as input0.
3933    *
3934    * Available since NNAPI feature level 3.
3935    */
3936   ANEURALNETWORKS_LOGICAL_NOT = 62,
3937 
3938   /**
3939    * Returns the truth value of x OR y element-wise.
3940    *
3941    * Supported tensor {@link OperandCode}:
3942    * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3943    *
3944    * Supported tensor rank: from 1
3945    *
3946    * This operation supports broadcasting.
3947    *
3948    * Inputs:
3949    * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3950    * * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions
3951    *      compatible with input0.
3952    *
3953    * Outputs:
3954    * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3955    *
3956    * Available since NNAPI feature level 3.
3957    */
3958   ANEURALNETWORKS_LOGICAL_OR = 63,
3959 
3960   /**
3961    * Computes the log softmax activations given logits.
3962    *
3963    * The output is calculated using this formula:
3964    *
3965    *     output = logits * beta - log(reduce_sum(exp(logits * beta), axis))
3966    *
3967    * Supported tensor {@link OperandCode}:
3968    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3969    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3970    *
3971    * Supported tensor rank: from 1.
3972    *
3973    * Inputs:
3974    * * 0: A tensor specifying the input logits.
3975    * * 1: A scalar, specifying the positive scaling factor for the exponent,
3976    *      beta.
3977    *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta
3978    *      value must be of {@link ANEURALNETWORKS_FLOAT16}.
3979    *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta
3980    *      value must be of {@link ANEURALNETWORKS_FLOAT32}.
3981    * * 2: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to
3982    *      reduce across. Negative index is used to specify axis from the
3983    *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
3984    *
3985    * Outputs:
3986    * * 0: The output tensor of the same {@link OperandCode} and shape as
3987    *      input0.
3988    *
3989    * Available since NNAPI feature level 3.
3990    */
3991   ANEURALNETWORKS_LOG_SOFTMAX = 64,
3992 
3993   /**
3994    * Returns the element-wise maximum of two tensors.
3995    *
3996    * Supported tensor {@link OperandCode}:
3997    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3998    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3999    * * {@link ANEURALNETWORKS_TENSOR_INT32}
4000    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4001    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4002    *
4003    * Supported tensor rank: from 1.
4004    *
4005    * Inputs:
4006    * * 0: A tensor.
4007    * * 1: A tensor of the same {@link OperandCode} and compatible dimensions
4008    *      with input0.
4009    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
4010    *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
4011    *
4012    * Outputs:
4013    * * 0: A tensor of the same {@link OperandCode} as input0.
4014    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4015    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
4016    *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4017    *
4018    * Available since NNAPI feature level 3.
4019    */
4020   ANEURALNETWORKS_MAXIMUM = 65,
4021 
4022   /**
4023    * Returns the element-wise minimum of two tensors.
4024    *
4025    * Supported tensor {@link OperandCode}:
4026    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4027    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4028    * * {@link ANEURALNETWORKS_TENSOR_INT32}
4029    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4030    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4031    *
4032    * Supported tensor rank: from 1.
4033    *
4034    * Inputs:
4035    * * 0: A tensor.
4036    * * 1: A tensor of the same {@link OperandCode} and compatible dimensions
4037    *      with input0.
4038    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
4039    *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
4040    *
4041    * Outputs:
4042    * * 0: A tensor of the same {@link OperandCode} as input0.
4043    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4044    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
4045    *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4046    *
4047    * Available since NNAPI feature level 3.
4048    */
4049   ANEURALNETWORKS_MINIMUM = 66,
4050 
4051   /**
4052    * Computes numerical negative value element-wise.
4053    *
4054    * Supported tensor {@link OperandCode}:
4055    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4056    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4057    * * {@link ANEURALNETWORKS_TENSOR_INT32}
4058    *
4059    * Supported tensor rank: from 1.
4060    *
4061    * Inputs:
4062    * * 0: A tensor.
4063    *
4064    * Outputs:
4065    * * 0: The output tensor of same shape as input0.
4066    *
4067    * Available since NNAPI feature level 3.
4068    */
4069   ANEURALNETWORKS_NEG = 67,
4070 
4071   /**
4072    * For input tensors x and y, computes x != y elementwise.
4073    *
4074    * Supported tensor {@link OperandCode}:
4075    * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
4076    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4077    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4078    * * {@link ANEURALNETWORKS_TENSOR_INT32}
4079    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4080    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4081    *
4082    * Supported tensor rank: from 1
4083    *
4084    * This operation supports broadcasting.
4085    *
4086    * Inputs:
4087    * * 0: A tensor.
4088    * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
4089    *      with input0.
4090    *
4091    * Outputs:
4092    * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
4093    *
4094    * Available since NNAPI feature level 3.
4095    */
4096   ANEURALNETWORKS_NOT_EQUAL = 68,
4097 
4098   /**
4099    * Pads a tensor with the given constant value according to the specified
4100    * paddings.
4101    *
4102    * Supported tensor {@link OperandCode}:
4103    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4104    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4105    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4106    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4107    *
4108    * Supported tensor rank: up to 4
4109    *
4110    * Inputs:
4111    * * 0: An n-D tensor, specifying the tensor to be padded.
4112    * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings
4113    *      for each spatial dimension of the input tensor. The shape of the
4114    *      tensor must be {rank(input0), 2}.
4115    *      padding[i, 0] specifies the number of elements to be padded in the
4116    *      front of dimension i.
4117    *      padding[i, 1] specifies the number of elements to be padded after
4118    *      the end of dimension i.
4119    * * 2: A scalar specifying the value to use for padding input0.
4120    *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the
4121    *      pad value must be of {@link ANEURALNETWORKS_FLOAT16}.
4122    *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the
4123    *      pad value must be of {@link ANEURALNETWORKS_FLOAT32}.
4124    *      For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4125    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
4126    *      the pad value must be of {@link ANEURALNETWORKS_INT32}. The
4127    *      scale and zeroPoint are assumed to be the same as in input0.
4128    *
4129    * Outputs:
4130    * * 0: A tensor of the same {@link OperandCode} as input0. The
4131    *      output tensor has the same rank as input0, and each
4132    *      dimension of the output tensor has the same size as the
4133    *      corresponding dimension of the input tensor plus the size
4134    *      of the padding:
4135    *          output0.dimension[i] =
4136    *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
4137    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4138    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4139    *      the scale and zeroPoint must be the same as input0.
4140    *
4141    * Available since NNAPI feature level 3.
4142    */
4143   ANEURALNETWORKS_PAD_V2 = 69,
4144 
4145   /**
4146    * Computes the power of one value to another.
4147    *
4148    * Given a tensor base and a tensor exponent, this operation computes
4149    * base^exponent elementwise.
4150    *
4151    * This operations supports broadcasting. The size of the output is the
4152    * maximum size along each dimension of the input operands. It starts with
4153    * the trailing dimensions, and works its way forward.
4154    *
4155    * For example:
4156    *     base.dimension     =    {4, 1, 2}
4157    *     exponent.dimension = {5, 4, 3, 1}
4158    *     output.dimension   = {5, 4, 3, 2}
4159    *
4160    * Supported tensor {@link OperandCode}:
4161    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4162    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4163    *
4164    * Supported tensor rank: from 1
4165    *
4166    * Inputs:
4167    * * 0: A tensor specifying the base.
4168    * * 1: A tensor specifying the exponent.
4169    *
4170    * Outputs:
4171    * * 0: An output tensor.
4172    *
4173    * Available since NNAPI feature level 3.
4174    */
4175   ANEURALNETWORKS_POW = 70,
4176 
4177   /**
4178    * Parametric Rectified Linear Unit.
4179    *
4180    * It follows: f(x) = alpha * x for x < 0, f(x) = x for x >= 0, where alpha
4181    * is a learned array with the same {@link OperandCode} and compatible
4182    * dimensions as input x.
4183    *
4184    * Two dimensions are compatible when:
4185    *     1. they are equal, or
4186    *     2. one of them is 1
4187    *
4188    * The size of the output is the maximum size along each dimension of the
4189    * input operands. It starts with the trailing dimensions, and works its way
4190    * forward.
4191    *
4192    * Example:
4193    *     input.dimension  =    {4, 1, 2}
4194    *     alpha.dimension  = {5, 4, 3, 1}
4195    *     output.dimension = {5, 4, 3, 2}
4196    *
4197    * Supported tensor {@link OperandCode}:
4198    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4199    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4200    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4201    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4202    *
4203    * Supported tensor rank: from 1
4204    *
4205    * Inputs:
4206    * * 0: A tensor, specifying the input.
4207    * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
4208    *      as input0, specifying the alpha.
4209    *
4210    * Outputs:
4211    * * 0: A tensor of the same {@link OperandCode} as input0.
4212    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4213    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4214    *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
4215    *
4216    * Available since NNAPI feature level 3.
4217    */
4218   ANEURALNETWORKS_PRELU = 71,
4219 
4220   /**
4221    * Quantizes the input tensor.
4222    *
4223    * The formula for {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} output tensor is:
4224    *
4225    *     output = max(0, min(255, round(input / scale) + zeroPoint)
4226    *
4227    * The formula for {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} output
4228    * tensor is:
4229    *
4230    *     output = max(-128, min(127, round(input / scale) + zeroPoint)
4231    *
4232    * Supported input tensor {@link OperandCode}:
4233    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4234    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4235    *
4236    * Supported output tensor {@link OperandCode}:
4237    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4238    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4239    *
4240    * Supported tensor rank: from 1
4241    *
4242    * Inputs:
4243    * * 0: A tensor, may be zero-sized.
4244    *
4245    * Outputs:
4246    * * 0: The output tensor of same shape as input0, but with
4247    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or.
4248    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}.
4249    *
4250    * Available since NNAPI feature level 3.
4251    */
4252   ANEURALNETWORKS_QUANTIZE = 72,
4253 
4254   /**
4255    * A version of quantized LSTM, using 16 bit quantization for internal
4256    * state.
4257    *
4258    * There is no projection layer, so cell state size is equal to the output
4259    * size.
4260    *
4261    * Inputs:
4262    * * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4263    *      and shape [numBatches, inputSize] specifying the input to the LSTM
4264    *      cell. Tensor is quantized with a fixed quantization range of
4265    *      [-1, 127/128] (scale = 1/128, zeroPoint = 128).
4266    * * 1: The input-to-input weights.
4267    *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4268    *      and shape [outputSize, inputSize] specifying input-to-input part of
4269    *      weights for fully-connected layer inside the LSTM cell.
4270    *      Quantization zero point and scale must be the same across all the
4271    *      weights.
4272    * * 2: The input-to-forget weights.
4273    *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4274    *      and shape [outputSize, inputSize] specifying input-to-forget part of
4275    *      weights for fully-connected layer inside the LSTM cell.
4276    *      Quantization zero point and scale must be the same across all the
4277    *      weights.
4278    * * 3: The input-to-cell weights.
4279    *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4280    *      and shape [outputSize, inputSize] specifying input-to-cell part of
4281    *      weights for fully-connected layer inside the LSTM cell.
4282    *      Quantization zero point and scale must be the same across all the
4283    *      weights.
4284    * * 4: The input-to-output weights.
4285    *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4286    *      and shape [outputSize, inputSize] specifying input-to-output part of
4287    *      weights for fully-connected layer inside the LSTM cell.
4288    *      Quantization zero point and scale must be the same across all the
4289    *      weights.
4290    * * 5: The recurrent-to-input weights.
4291    *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4292    *      and shape [outputSize, outputSize] specifying recurrent-to-input part
4293    *      of weights for fully-connected layer inside the LSTM cell.
4294    *      Quantization zero point and scale must be the same across all the
4295    *      weights.
4296    * * 6: The recurrent-to-forget weights.
4297    *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4298    *      and shape [outputSize, outputSize] specifying recurrent-to-forget
4299    *      part of weights for fully-connected layer inside the LSTM cell.
4300    *      Quantization zero point and scale must be the same across all the
4301    *      weights.
4302    * * 7: The recurrent-to-cell weights.
4303    *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4304    *      and shape [outputSize, outputSize] specifying recurrent-to-cell part
4305    *      of weights for fully-connected layer inside the LSTM cell.
4306    *      Quantization zero point and scale must be the same across all the
4307    *      weights.
4308    * * 8: The recurrent-to-output weights.
4309    *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4310    *      and shape [outputSize, outputSize] specifying recurrent-to-output
4311    *      part of weights for fully-connected layer inside the LSTM cell.
4312    *      Quantization zero point and scale must be the same across all the
4313    *      weights.
4314    * * 9: The input gate bias.
4315    *      A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
4316    *      [outputSize] specifying the bias for the fully-connected layer
4317    *      inside the LSTM cell. Bias is quantized with scale being a product
4318    *      of input and weights scales and zeroPoint equal to 0.
4319    * * 10:The forget gate bias.
4320    *      A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
4321    *      [outputSize] specifying the bias for the fully-connected layer
4322    *      inside the LSTM cell. Bias is quantized with scale being a product
4323    *      of input and weights scales and zeroPoint equal to 0.
4324    * * 11:The cell bias.
4325    *      A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
4326    *      [outputSize] specifying the bias for the fully-connected layer
4327    *      inside the LSTM cell. Bias is quantized with scale being a product
4328    *      of input and weights scales and zeroPoint equal to 0.
4329    * * 12:The output gate bias.
4330    *      A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
4331    *      [outputSize] specifying the bias for the fully-connected layer
4332    *      inside the LSTM cell. Bias is quantized with scale being a product
4333    *      of input and weights scales and zeroPoint equal to 0.
4334    * * 13: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
4335    *       and shape [numBatches, outputSize] specifying the cell state from the
4336    *       previous time step of the LSTM cell. It is quantized using a
4337    *       quantization range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 /
4338    *       32768, zeroPoint = 0).
4339    * * 14: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4340    *       and shape [numBathes, outputSize] specifying the output of the LSTM
4341    *       cell from previous time-step. Tensor is quantized with a fixed
4342    *       quantization range of [-1, 127/128] (scale = 1/128, zeroPoint =
4343    *       128).
4344    *
4345    *
4346    * Outputs:
4347    * * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
4348    *      and shape [numBatches, outputSize] which contains a cell state from
4349    *      the current time step. Tensor is quantized using a quantization
4350    *      range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 32768, zeroPoint =
4351    *      0).
4352    * * 1: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4353    *      and shape [numBathes, outputSize] which contains the output value.
4354    *      Tensor is quantized with a fixed quantization range of [-1, 127/128]
4355    *      (scale = 1/128, zeroPoint = 128).
4356    */
4357   ANEURALNETWORKS_QUANTIZED_16BIT_LSTM = 73,
4358 
4359   /**
4360    * Draws samples from a multinomial distribution.
4361    *
4362    * Supported tensor {@link OperandCode}:
4363    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4364    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4365    *
4366    * Inputs:
4367    * * 0: A 2-D tensor with shape [batches, classes], specifying the
4368    *      unnormalized log-probabilities for all classes.
4369    * * 1: A scalar {@link ANEURALNETWORKS_INT32}, specifying the number of
4370    *      independent samples to draw for each row slice.
4371    * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape [2],
4372    *      specifying seeds used to initialize the random distribution. If both
4373    *      provided seeds are 0, both will be randomly generated.
4374    * Outputs:
4375    * * 0: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape
4376    *      [batches, samples], containing the drawn samples.
4377    *
4378    * Available since NNAPI feature level 3.
4379    */
4380   ANEURALNETWORKS_RANDOM_MULTINOMIAL = 74,
4381 
4382   /**
4383    * Reduces a tensor by computing the "logical and" of elements along given
4384    * dimensions.
4385    *
4386    * If keep_dims is true, the reduced dimensions are
4387    * retained with length 1. Otherwise, the rank of the tensor is reduced by
4388    * 1 for each entry in dimensions.
4389    *
4390    * Supported tensor {@link OperandCode}:
4391    * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
4392    *
4393    * Supported tensor rank: up to 4
4394    *
4395    * Inputs:
4396    * * 0: An n-D tensor.
4397    * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4398    *      to reduce. Dimension values must be in the range [-n, n).
4399    * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4400    *      retains reduced dimensions with length 1.
4401    *
4402    * Outputs:
4403    * * 0: A tensor of the same {@link OperandCode} as input0.
4404    *      If all dimensions are reduced and keep_dims is false, the output
4405    *      shape is [1].
4406    *
4407    * Available since NNAPI feature level 3.
4408    */
4409   ANEURALNETWORKS_REDUCE_ALL = 75,
4410 
4411   /**
4412    * Reduces a tensor by computing the "logical or" of elements along given
4413    * dimensions.
4414    *
4415    * If keep_dims is true, the reduced dimensions are
4416    * retained with length 1. Otherwise, the rank of the tensor is reduced by
4417    * 1 for each entry in dimensions.
4418    *
4419    * Supported tensor {@link OperandCode}:
4420    * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
4421    *
4422    * Supported tensor rank: up to 4
4423    *
4424    * Inputs:
4425    * * 0: An n-D tensor.
4426    * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4427    *      to reduce. Dimension values must be in the range [-n, n).
4428    * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4429    *      retains reduced dimensions with length 1.
4430    *
4431    * Outputs:
4432    * * 0: A tensor of the same {@link OperandCode} as input0.
4433    *      If all dimensions are reduced and keep_dims is false, the output
4434    *      shape is [1].
4435    *
4436    * Available since NNAPI feature level 3.
4437    */
4438   ANEURALNETWORKS_REDUCE_ANY = 76,
4439 
4440   /**
4441    * Reduces a tensor by computing the maximum of elements along given
4442    * dimensions.
4443    *
4444    * If keep_dims is true, the reduced dimensions are
4445    * retained with length 1. Otherwise, the rank of the tensor is reduced by
4446    * 1 for each entry in dimensions.
4447    *
4448    * Supported tensor {@link OperandCode}:
4449    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4450    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4451    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4452    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4453    *
4454    * Supported tensor rank: up to 4
4455    *
4456    * Inputs:
4457    * * 0: An n-D tensor.
4458    * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4459    *      to reduce. Dimension values must be in the range [-n, n).
4460    * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4461    *      retains reduced dimensions with length 1.
4462    *
4463    * Outputs:
4464    * * 0: A tensor of the same {@link OperandCode} as input0.
4465    *      If all dimensions are reduced and keep_dims is false, the output
4466    *      shape is [1].
4467    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4468    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4469    *      the scale and zeroPoint must be the same as input0.
4470    *
4471    * Available since NNAPI feature level 3.
4472    */
4473   ANEURALNETWORKS_REDUCE_MAX = 77,
4474 
4475   /**
4476    * Reduces a tensor by computing the minimum of elements along given
4477    * dimensions.
4478    *
4479    * If keep_dims is true, the reduced dimensions are
4480    * retained with length 1. Otherwise, the rank of the tensor is reduced by
4481    * 1 for each entry in dimensions.
4482    *
4483    * Supported tensor {@link OperandCode}:
4484    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4485    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4486    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4487    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4488    *
4489    * Supported tensor rank: up to 4
4490    *
4491    * Inputs:
4492    * * 0: An n-D tensor.
4493    * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4494    *      to reduce. Dimension values must be in the range [-n, n).
4495    * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4496    *      retains reduced dimensions with length 1.
4497    *
4498    * Outputs:
4499    * * 0: A tensor of the same {@link OperandCode} as input0.
4500    *      If all dimensions are reduced and keep_dims is false, the output
4501    *      shape is [1].
4502    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4503    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4504    *      the scale and zeroPoint must be the same as input0.
4505    *
4506    * Available since NNAPI feature level 3.
4507    */
4508   ANEURALNETWORKS_REDUCE_MIN = 78,
4509 
4510   /**
4511    * Reduces a tensor by multiplying elements along given dimensions.
4512    *
4513    * If keep_dims is true, the reduced dimensions are
4514    * retained with length 1. Otherwise, the rank of the tensor is reduced by
4515    * 1 for each entry in dimensions.
4516    *
4517    * Supported tensor {@link OperandCode}:
4518    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4519    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4520    *
4521    * Supported tensor rank: up to 4
4522    *
4523    * Inputs:
4524    * * 0: An n-D tensor.
4525    * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4526    *      to reduce. Dimension values must be in the range [-n, n).
4527    * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4528    *      retains reduced dimensions with length 1.
4529    *
4530    * Outputs:
4531    * * 0: A tensor of the same {@link OperandCode} as input0.
4532    *      If all dimensions are reduced and keep_dims is false, the output
4533    *      shape is [1].
4534    *
4535    * Available since NNAPI feature level 3.
4536    */
4537   ANEURALNETWORKS_REDUCE_PROD = 79,
4538 
4539   /**
4540    * Reduces a tensor by summing elements along given dimensions.
4541    *
4542    * If keep_dims is true, the reduced dimensions are
4543    * retained with length 1. Otherwise, the rank of the tensor is reduced by
4544    * 1 for each entry in dimensions.
4545    *
4546    * Supported tensor {@link OperandCode}:
4547    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4548    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4549    *
4550    * Supported tensor rank: up to 4
4551    *
4552    * Inputs:
4553    * * 0: An n-D tensor.
4554    * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4555    *      to reduce. Dimension values must be in the range [-n, n).
4556    * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4557    *      retains reduced dimensions with length 1.
4558    *
4559    * Outputs:
4560    * * 0: A tensor of the same {@link OperandCode} as input0.
4561    *      If all dimensions are reduced and keep_dims is false, the output
4562    *      shape is [1].
4563    *
4564    * Available since NNAPI feature level 3.
4565    */
4566   ANEURALNETWORKS_REDUCE_SUM = 80,
4567 
4568   /**
4569    * Select and scale the feature map of each region of interest to a unified
4570    * output size by average pooling sampling points from bilinear interpolation.
4571    *
4572    * The region of interest is represented by its upper-left corner coordinate
4573    * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
4574    * A spatial scaling factor is applied to map into feature map coordinate.
4575    * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
4576    *
4577    * No rounding is applied in this operation. The sampling points are unified
4578    * distributed in the pooling bin and their values are calculated by bilinear
4579    * interpolation.
4580    *
4581    * Supported tensor {@link OperandCode}:
4582    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4583    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4584    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4585    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4586    *
4587    * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4588    * With the default data layout NHWC, the data is stored in the order of:
4589    * [batch, height, width, channels]. Alternatively, the data layout could
4590    * be NCHW, the data storage order of: [batch, channels, height, width].
4591    *
4592    * Inputs:
4593    * * 0: A 4-D tensor, specifying the feature map.
4594    * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
4595    *      the regions of interest, each line with format [x1, y1, x2, y2].
4596    *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
4597    *      this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},
4598    *      with zeroPoint of 0 and scale of 0.125. Zero num_rois is
4599    *      supported for this tensor.
4600    * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
4601    *      [num_rois], specifying the batch index of each box. Boxes with
4602    *      the same batch index are grouped together. Zero num_rois is
4603    *      supported for this tensor.
4604    * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
4605    *      height of the output tensor.
4606    * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
4607    *      width of the output tensor.
4608    * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
4609    *      from the height of original image to the height of feature map.
4610    * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
4611    *      from the width of original image to the width of feature map.
4612    * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
4613    *      sampling points in height dimension used to compute the output.
4614    *      Set to 0 for adaptive value of ceil(roi_height/out_height).
4615    * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
4616    *      sampling points in width dimension used to compute the output.
4617    *      Set to 0 for adaptive value of ceil(roi_width/out_width).
4618    * * 9: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
4619    *      NCHW data layout for input0 and output0. Set to false for NHWC.
4620    *
4621    * Outputs:
4622    * * 0: A tensor of the same {@link OperandCode} as input0. The output
4623    *      shape is [num_rois, out_height, out_width, depth].
4624    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4625    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4626    *      the scale and zeroPoint can be different from the input0 scale and zeroPoint.
4627    *
4628    * Available since NNAPI feature level 3.
4629    */
4630   ANEURALNETWORKS_ROI_ALIGN = 81,
4631 
4632   /**
4633    * Select and scale the feature map of each region of interest to a unified
4634    * output size by max-pooling.
4635    *
4636    * The region of interest is represented by its upper-left corner coordinate
4637    * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
4638    * A spatial scaling factor is applied to map into feature map coordinate.
4639    * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
4640    *
4641    * Rounding is applied in this operation to ensure integer boundary for
4642    * regions of interest and pooling bins.
4643    *
4644    * Supported tensor {@link OperandCode}:
4645    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4646    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4647    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4648    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4649    *
4650    * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4651    * With the default data layout NHWC, the data is stored in the order of:
4652    * [batch, height, width, channels]. Alternatively, the data layout could
4653    * be NCHW, the data storage order of: [batch, channels, height, width].
4654    *
4655    * Inputs:
4656    * * 0: A 4-D tensor, specifying the feature map.
4657    * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
4658    *      the regions of interest, each line with format [x1, y1, x2, y2].
4659    *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4660    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4661    *      this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},
4662    *      with zeroPoint of 0 and scale of 0.125.
4663    * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
4664    *      [num_rois], specifying the batch index of each box. Boxes with
4665    *      the same batch index are grouped together.
4666    * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
4667    *      height of the output tensor.
4668    * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
4669    *      width of the output tensor.
4670    * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
4671    *      from the height of original image to the height of feature map.
4672    * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
4673    *      from the width of original image to the width of feature map.
4674    * * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
4675    *      NCHW data layout for input0 and output0. Set to false for NHWC.
4676    *
4677    * Outputs:
4678    * * 0: A tensor of the same {@link OperandCode} as input0. The output
4679    *      shape is [num_rois, out_height, out_width, depth].
4680    *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4681    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4682    *      the scale and zeroPoint must be the same as input0.
4683    *
4684    * Available since NNAPI feature level 3.
4685    */
4686   ANEURALNETWORKS_ROI_POOLING = 82,
4687 
4688   /**
4689    * Computes reciprocal of square root of x element-wise.
4690    *
4691    * Supported tensor {@link OperandCode}:
4692    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4693    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4694    *
4695    * Supported tensor rank: from 1.
4696    *
4697    * Inputs:
4698    * * 0: A tensor.
4699    *
4700    * Outputs:
4701    * * 0: The output tensor of same shape as input0.
4702    *
4703    * Available since NNAPI feature level 3.
4704    */
4705   ANEURALNETWORKS_RSQRT = 83,
4706 
4707   /**
4708    * Using a tensor of booleans c and input tensors x and y select values
4709    * elementwise from both input tensors:
4710    *
4711    * O[i] = C[i] ? x[i] : y[i].
4712    *
4713    * Supported tensor {@link OperandCode}:
4714    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4715    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4716    * * {@link ANEURALNETWORKS_TENSOR_INT32}
4717    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4718    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4719    *
4720    * Supported tensor rank: from 1
4721    *
4722    * Inputs:
4723    * * 0: A tensor of type {@link ANEURALNETWORKS_TENSOR_BOOL8} acting as a
4724    *      mask that chooses, based on the value at each element, whether the
4725    *      corresponding element in the output should be taken from input1 (if
4726    *      true) or input2 (if false).
4727    * * 1: An input tensor of the same shape as input0.
4728    * * 2: An input tensor of the same shape and type as input1.
4729    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4730    *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4731    *      the scales and zeroPoint can be different from input1 scale and zeroPoint.
4732    *
4733    * Outputs:
4734    * * 0: A tensor of the same type and shape as input1 and input2.
4735    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
4736    *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4737    *
4738    * Available since NNAPI feature level 3.
4739    */
4740   ANEURALNETWORKS_SELECT = 84,
4741 
4742   /**
4743    * Computes sin of x element-wise.
4744    *
4745    * Supported tensor {@link OperandCode}:
4746    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4747    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4748    *
4749    * Supported tensor rank: from 1.
4750    *
4751    * Inputs:
4752    * * 0: A tensor.
4753    *
4754    * Outputs:
4755    * * 0: The output tensor of same shape as input0.
4756    *
4757    * Available since NNAPI feature level 3.
4758    */
4759   ANEURALNETWORKS_SIN = 85,
4760 
4761   /**
4762    * Extracts a slice of specified size from the input tensor starting at a
4763    * specified location.
4764    *
4765    * The starting location is specified as a 1-D tensor containing offsets
4766    * for each dimension. The size is specified as a 1-D tensor containing
4767    * either size of a slice along corresponding dimension or -1. In the latter
4768    * case, all the remaining elements in dimension are included in the slice.
4769    *
4770    * A sum of begin offset and a size of a slice must not exceed size of a
4771    * corresponding dimension.
4772    *
4773    * Supported tensor {@link OperandCode}:
4774    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4775    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4776    * * {@link ANEURALNETWORKS_TENSOR_INT32}
4777    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4778    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4779    *
4780    * Supported tensor rank: from 1
4781    *
4782    * Inputs:
4783    * * 0: An n-D tensor to take slice from, may be zero-sized.
4784    * * 1: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying
4785    *      the beginning indices of the slice in each dimension.
4786    * * 2: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying
4787    *      the size of the slice in each dimension.
4788    *
4789    * Outputs:
4790    * * 0: An n-D tensor of the same type as the input containing the slice.
4791    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4792    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4793    *      its scale and zeroPoint has to be same as the input0 scale and zeroPoint.
4794    *
4795    * Available since NNAPI feature level 3.
4796    */
4797   ANEURALNETWORKS_SLICE = 86,
4798 
4799   /**
4800    * Splits a tensor along a given axis into num_splits subtensors.
4801    *
4802    * Supported tensor {@link OperandCode}:
4803    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4804    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4805    * * {@link ANEURALNETWORKS_TENSOR_INT32}
4806    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4807    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4808    *
4809    * Supported tensor rank: from 1
4810    *
4811    * Inputs:
4812    * * 0: An n-D tensor to split.
4813    * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis along
4814    *      which to split.
4815    * * 2: An {@link ANEURALNETWORKS_INT32} scalar indicating the number of
4816    *      splits along given axis. Must evenly divide axis size.
4817    *
4818    * Outputs:
4819    * * 0 ~ (num_splits - 1): Resulting subtensors.
4820    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4821    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4822    *      the scale and zeroPoint must be the same as input0.
4823    *
4824    * Available since NNAPI feature level 3.
4825    */
4826   ANEURALNETWORKS_SPLIT = 87,
4827 
4828   /**
4829    * Computes square root of x element-wise.
4830    *
4831    * Supported tensor {@link OperandCode}:
4832    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4833    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4834    *
4835    * Supported tensor rank: from 1.
4836    *
4837    * Inputs:
4838    * * 0: A tensor.
4839    *
4840    * Outputs:
4841    * * 0: The output tensor of same shape as input0.
4842    *
4843    * Available since NNAPI feature level 3.
4844    */
4845   ANEURALNETWORKS_SQRT = 88,
4846 
4847   /**
4848    * Constructs a tensor by tiling a given tensor.
4849    *
4850    * This operation creates a new tensor by replicating `input` `multiples`
4851    * times. The output tensor's i-th dimension has `input.dims(i) * multiples[i]`
4852    * elements, and the values of `input` are replicated `multiples[i]` times
4853    * along the i-th dimension.
4854    * For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`.
4855    *
4856    * Supported tensor {@link OperandCode}:
4857    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4858    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4859    * * {@link ANEURALNETWORKS_TENSOR_INT32}
4860    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4861    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4862    *
4863    * Supported tensor rank: from 1
4864    *
4865    * Inputs:
4866    * * 0: input, an n-D tensor specifying the input.
4867    * * 1: multiples, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}.
4868    *      The length of multiples must be n.
4869    *
4870    * Outputs:
4871    * * 0: A tiled tensor of the same {@link OperandCode} and rank as `input`.
4872    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4873    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4874    *      the scale and zeroPoint must be the same as input0.
4875    *
4876    * Available since NNAPI feature level 3.
4877    */
4878   ANEURALNETWORKS_TILE = 89,
4879 
4880   /**
4881    * Finds values and indices of the k largest entries for the last dimension.
4882    *
4883    * Resulting values in each dimensions are sorted in descending order. If
4884    * two values are equal, the one with larger index appears first.
4885    *
4886    * Supported tensor {@link OperandCode}:
4887    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4888    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4889    * * {@link ANEURALNETWORKS_TENSOR_INT32}
4890    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4891    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4892    *
4893    * Supported tensor rank: from 1
4894    *
4895    * Inputs:
4896    * * 0: input, an n-D tensor specifying the input.
4897    * * 1: k, an {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
4898    *      top elements to look for along the last dimension.
4899    *
4900    * Outputs:
4901    * * 0: An n-D tensor of the same type as the input, containing the k
4902    *      largest elements along each last dimensional slice.
4903    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4904    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4905    *      the scale and zeroPoint must be the same as input0.
4906    * * 1: An n-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32}
4907    *      containing the indices of values within the last dimension of input.
4908    *
4909    * Available since NNAPI feature level 3.
4910    */
4911   ANEURALNETWORKS_TOPK_V2 = 90,
4912 
4913   /**
4914    * Performs the transpose of 2-D convolution operation.
4915    *
4916    * This operation is sometimes called "deconvolution" after Deconvolutional
4917    * Networks, but is actually the transpose (gradient) of
4918    * {@link ANEURALNETWORKS_CONV_2D} rather than an actual deconvolution.
4919    *
4920    * The output dimensions are functions of the filter dimensions, stride, and
4921    * padding.
4922    *
4923    * Supported tensor {@link OperandCode} configurations:
4924    * * 16 bit floating point:
4925    * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.
4926    *
4927    * * 32 bit floating point:
4928    * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.
4929    *
4930    * * Quantized:
4931    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.
4932    * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
4933    * * * input.scale * filter.scale).
4934    *
4935    * * Quantized with symmetric per channel quantization for the filter:
4936    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.
4937    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
4938    * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
4939    * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
4940    *
4941    * Available since NNAPI feature level 4:
4942    * * Quantized signed (since NNAPI feature level 4):
4943    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
4944    * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
4945    * * * input.scale * filter.scale).
4946    *
4947    * * Quantized signed with filter symmetric per channel quantization
4948    *   (since NNAPI feature level 4):
4949    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
4950    * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
4951    * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
4952    * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
4953    *
4954    * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4955    * With the default data layout NHWC, the data is stored in the order of:
4956    * [batch, height, width, channels]. Alternatively, the data layout could
4957    * be NCHW, the data storage order of: [batch, channels, height, width].
4958    *
4959    * Both explicit padding and implicit padding are supported.
4960    *
4961    * Inputs (explicit padding):
4962    * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
4963    *      specifying the input.
4964    *      Since API level 29, zero batches is supported for this tensor.
4965    * * 1: A 4-D tensor, of shape
4966    *      [depth_out, filter_height, filter_width, depth_in], specifying the
4967    *      filter. For tensor of type
4968    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
4969    *      dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0.
4970    * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
4971    *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
4972    *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the
4973    *      same type.
4974    *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4975    *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
4976    *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32},
4977    *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
4978    *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
4979    *      the bias must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
4980    *      and bias_scale of 0. The actual scale of each value 'i' is equal to
4981    *      bias_scale[i] = input_scale * filter_scale[i].
4982    * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
4983    *      the left, in the ‘width’ dimension.
4984    * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
4985    *      the right, in the ‘width’ dimension.
4986    * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
4987    *      the top, in the ‘height’ dimension.
4988    * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
4989    *      the bottom, in the ‘height’ dimension.
4990    * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
4991    *      walking through input in the ‘width’ dimension.
4992    * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
4993    *      walking through input in the ‘height’ dimension.
4994    * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
4995    *      {@link FuseCode} values. Specifies the activation to
4996    *      invoke on the result.
4997    * * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
4998    *       NCHW data layout for input0 and output0. Set to false for NHWC.
4999    *
5000    * Inputs (implicit padding):
5001    * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
5002    *      specifying the input.
5003    *      Since API level 29, zero batches is supported for this tensor.
5004    * * 1: A 4-D tensor, of shape
5005    *      [depth_out, filter_height, filter_width, depth_in], specifying the
5006    *      filter. For tensor of type
5007    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
5008    *      dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0.
5009    * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
5010    *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
5011    *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias should be of the
5012    *      same type.
5013    *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
5014    *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
5015    *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32},
5016    *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
5017    *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
5018    *      the bias must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
5019    *      and bias_scale of 0. The actual scale of each value 'i' is equal to
5020    *      bias_scale[i] = input_scale * filter_scale[i].
5021    * * 3: An {@link ANEURALNETWORKS_TENSOR_INT32} tensor, specifying the output
5022    *      tensor shape.
5023    * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
5024    *      padding scheme, has to be one of the
5025    *      {@link PaddingCode} values.
5026    * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
5027    *      walking through input in the ‘width’ dimension.
5028    * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
5029    *      walking through input in the ‘height’ dimension.
5030    * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
5031    *      {@link FuseCode} values. Specifies the activation to
5032    *      invoke on the result.
5033    * * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
5034    *      NCHW data layout for input0 and output0. Set to false for NHWC.
5035    *
5036    * Outputs:
5037    * * 0: The output 4-D tensor, of shape
5038    *      [batches, out_height, out_width, depth_out].
5039    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
5040    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5041    *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
5042    *
5043    * Available since NNAPI feature level 3.
5044    */
5045   ANEURALNETWORKS_TRANSPOSE_CONV_2D = 91,
5046 
5047   /**
5048    * A recurrent neural network specified by an LSTM cell.
5049    *
5050    * Performs (fully) dynamic unrolling of input.
5051    *
5052    * This Op unrolls the input along the time dimension, and implements the
5053    * following operation for each element in the sequence
5054    * s = 1...sequence_length:
5055    *   outputs[s] = projection(state = activation(LSTMOp(inputs[s])))
5056    *
5057    * Where LSTMOp is the LSTM op as in {@link ANEURALNETWORKS_LSTM},
5058    * the "projection" is an optional projection layer from state and output
5059    * and the “activation” is the function passed as the
5060    * “fused_activation_function” argument (if not “NONE”).
5061    *
5062    * Supported tensor {@link OperandCode}:
5063    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5064    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5065    *
5066    * Supported tensor rank: 3, either time-major or batch-major.
5067    *
5068    * All input and output tensors must be of the same type.
5069    *
5070    * Inputs:
5071    * * 0: The input (\f$x_t\f$).
5072    *      A 3-D tensor of shape:
5073    *        If time-major: [max_time, batch_size, input_size]
5074    *        If batch-major: [batch_size, max_time, input_size]
5075    *      where “max_time” is the number of timesteps (sequence length),
5076    *      “batch_size” corresponds to the batching dimension, and
5077    *      “input_size” is the size of the input.
5078    * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
5079    *      A 2-D tensor of shape [num_units, input_size], where “num_units”
5080    *      corresponds to the number of cell units.
5081    * * 2: The input-to-forget weights (\f$W_{xf}\f$).
5082    *      A 2-D tensor of shape [num_units, input_size].
5083    * * 3: The input-to-cell weights (\f$W_{xc}\f$).
5084    *      A 2-D tensor of shape [num_units, input_size].
5085    * * 4: The input-to-output weights (\f$W_{xo}\f$).
5086    *      A 2-D tensor of shape [num_units, input_size].
5087    * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
5088    *      A 2-D tensor of shape [num_units, output_size], where “output_size”
5089    *      corresponds to either the number of cell units (i.e., “num_units”),
5090    *      or the second dimension of the “projection_weights”, if defined.
5091    * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
5092    *      A 2-D tensor of shape [num_units, output_size].
5093    * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
5094    *      A 2-D tensor of shape [num_units, output_size].
5095    * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
5096    *      A 2-D tensor of shape [num_units, output_size].
5097    * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
5098    *      A 1-D tensor of shape [num_units].
5099    * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
5100    *      A 1-D tensor of shape [num_units].
5101    * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
5102    *      A 1-D tensor of shape [num_units].
5103    * * 12:The input gate bias (\f$b_i\f$). Optional.
5104    *      A 1-D tensor of shape [num_units].
5105    * * 13:The forget gate bias (\f$b_f\f$).
5106    *      A 1-D tensor of shape [num_units].
5107    * * 14:The cell bias (\f$b_c\f$).
5108    *      A 1-D tensor of shape [num_units].
5109    * * 15:The output gate bias (\f$b_o\f$).
5110    *      A 1-D tensor of shape [num_units].
5111    * * 16:The projection weights (\f$W_{proj}\f$). Optional.
5112    *      A 2-D tensor of shape [output_size, num_units].
5113    * * 17:The projection bias (\f$b_{proj}\f$). Optional.
5114    *      A 1-D tensor of shape [output_size].
5115    * * 18:The output state (in) (\f$h_{t-1}\f$).
5116    *      A 2-D tensor of shape [batch_size, output_size].
5117    * * 19:The cell state (in) (\f$C_{t-1}\f$).
5118    *      A 2-D tensor of shape [batch_size, num_units].
5119    * * 20:The activation function (\f$g\f$).
5120    *      A value indicating the activation function:
5121    *      <ul>
5122    *      <li>0: None;
5123    *      <li>1: Relu;
5124    *      <li>3: Relu6;
5125    *      <li>4: Tanh;
5126    *      <li>6: Sigmoid.
5127    *      </ul>
5128    * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
5129    *      that values are bound within [-cell_clip, cell_clip]. If set to 0.0
5130    *      then clipping is disabled.
5131    * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
5132    *      projection layer, such that values are bound within
5133    *      [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
5134    * * 23:Time-major if true, batch-major if false.
5135    * * 24:The input layer normalization weights. Optional.
5136    *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5137    *      to activation at input gate.
5138    * * 25:The forget layer normalization weights. Optional.
5139    *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5140    *      to activation at forget gate.
5141    * * 26:The cell layer normalization weights. Optional.
5142    *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5143    *      to activation at cell gate.
5144    * * 27:The output layer normalization weights. Optional.
5145    *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5146    *      to activation at output gate.
5147    *
5148    * Outputs:
5149    * * 0: The output (\f$o_t\f$).
5150    *      A 3-D tensor of shape:
5151    *        If time-major: [max_time, batch_size, output_size]
5152    *        If batch-major: [batch_size, max_time, output_size]
5153    * * 1: A tensor of shape [batch_size, output_size] containing a hidden
5154    *      state from the last time step in the sequence. This output is
5155    *      optional and can be omitted. If this output is present then
5156    *      output #2 must be present as well.
5157    *      Available since NNAPI feature level 4.
5158    * * 2: A tensor of shape [batch_size, cell_size] containing a cell state
5159    *      from the last time step in the sequence. This output is optional
5160    *      and can be omitted.
5161    *      Available since NNAPI feature level 4.
5162    *
5163    * Available since NNAPI feature level 3.
5164    *
5165    * Important: As of NNAPI feature level 3, there is no way to get the output state tensors out
5166    * and NNAPI does not maintain internal states. This operator does not support the usage pattern
5167    * in which multiple cells are chained and state tensors are propagated.
5168    */
5169   ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM = 92,
5170 
5171   /**
5172    * A recurrent neural network layer that applies a basic RNN cell to a
5173    * sequence of inputs.
5174    *
5175    * This layer unrolls the input along the sequence dimension, and implements
5176    * the following operation
5177    * for each element in the sequence s = 1...sequence_length:
5178    *   outputs[s] = state = activation(inputs[s] * input_weights’ + state *
5179    *   recurrent_weights’ + bias)
5180    *
5181    * Where:
5182    * * “input_weights” is a weight matrix that multiplies the inputs;
5183    * * “recurrent_weights” is a weight matrix that multiplies the current
5184    *    “state” which itself is the output from the previous time step
5185    *    computation;
5186    * * “bias” is a bias vector (added to each output vector in the batch);
5187    * * “activation” is the function passed as the “fused_activation_function”
5188    *   argument (if not “NONE”).
5189    *
5190    * Supported tensor {@link OperandCode}:
5191    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5192    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5193    *
5194    * The input tensors must all be the same type.
5195    *
5196    * Inputs:
5197    * * 0: input.
5198    *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
5199    *      it is set to 1, then the input has a shape [maxTime, batchSize,
5200    *      inputSize], otherwise the input has a shape [batchSize, maxTime,
5201    *      inputSize].
5202    * * 1: weights.
5203    *      A 2-D tensor of shape [numUnits, inputSize].
5204    * * 2: recurrent_weights.
5205    *      A 2-D tensor of shape [numUnits, numUnits].
5206    * * 3: bias.
5207    *      A 1-D tensor of shape [numUnits].
5208    * * 4: hidden state
5209    *      A 2-D tensor of shape [batchSize, numUnits]. Specifies a hidden
5210    *      state input for the first time step of the computation.
5211    * * 5: fusedActivationFunction.
5212    *      A {@link FuseCode} value indicating the activation function. If
5213    *      “NONE” is specified then it results in a linear activation.
5214    * * 6: timeMajor
5215    *      An {@link ANEURALNETWORKS_INT32} scalar specifying the shape format
5216    *      of input and output tensors. Must be set to either 0 or 1.
5217    * Outputs:
5218    * * 0: output.
5219    *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
5220    *      it is set to 1, then the output has a shape [maxTime, batchSize,
5221    *      numUnits], otherwise the output has a shape [batchSize, maxTime,
5222    *      numUnits].
5223    * * 1: A tensor of shape [batchSize, numUnits] containing hidden state
5224    *      from the last time step in the sequence. This output is optional
5225    *      and can be omitted.
5226    *      Available since NNAPI feature level 4.
5227    *
5228    * Available since NNAPI feature level 3.
5229    *
5230    * Important: As of NNAPI feature level 3, there is no way to get the output state tensors out
5231    * and NNAPI does not maintain internal states. This operator does not support the usage pattern
5232    * in which multiple cells are chained and state tensors are propagated.
5233    */
5234   ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN = 93,
5235 
5236   /**
5237    * Resizes images to given size using the nearest neighbor interpretation.
5238    *
5239    * Resized images must be distorted if their output aspect ratio is not the
5240    * same as input aspect ratio. The corner pixels of output may not be the
5241    * same as corner pixels of input.
5242    *
5243    * Supported tensor {@link OperandCode}:
5244    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5245    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5246    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
5247    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
5248    *
5249    * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
5250    * With the default data layout NHWC, the data is stored in the order of:
5251    * [batch, height, width, channels]. Alternatively, the data layout could
5252    * be NCHW, the data storage order of: [batch, channels, height, width].
5253    *
5254    * Both resizing by shape and resizing by scale are supported.
5255    *
5256    * Inputs (resizing by shape):
5257    * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
5258    *      the input. Zero batches is supported for this tensor.
5259    * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
5260    *      width of the output tensor.
5261    * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
5262    *      height of the output tensor.
5263    * * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false.
5264    *      Set to true to specify NCHW data layout for input0 and output0.
5265    * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}
5266    *      scalar, default to false.  If True, the centers of the 4 corner
5267    *      pixels of the input and output tensors are aligned, preserving the
5268    *      values at the corner pixels.
5269    *      Available since NNAPI feature level 4.
5270    * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}
5271    *      scalar, default to false. If True, the pixel centers are assumed to
5272    *      be at (0.5, 0.5). This is the default behavior of image.resize in
5273    *      TF 2.0. If this parameter is True, then align_corners parameter
5274    *      must be False.
5275    *      Available since NNAPI feature level 4.
5276    *
5277    * Inputs (resizing by scale):
5278    * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
5279    *      the input. Zero batches is supported for this tensor.
5280    * * 1: A scalar, specifying width_scale, the scaling factor of the width
5281    *      dimension from the input tensor to the output tensor. The output
5282    *      width is calculated as new_width = floor(width * width_scale).
5283    *      The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
5284    *      of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
5285    *      {@link ANEURALNETWORKS_FLOAT32} otherwise.
5286    * * 2: A scalar, specifying height_scale, the scaling factor of the height
5287    *      dimension from the input tensor to the output tensor. The output
5288    *      height is calculated as new_height = floor(height * height_scale).
5289    *      The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
5290    *      of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
5291    *      {@link ANEURALNETWORKS_FLOAT32} otherwise.
5292    * * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false.
5293    *      Set to true to specify NCHW data layout for input0 and output0.
5294    * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}
5295    *      scalar, default to false.  If True, the centers of the 4 corner
5296    *      pixels of the input and output tensors are aligned, preserving the
5297    *      values at the corner pixels.
5298    *      Available since NNAPI feature level 4.
5299    * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}
5300    *      scalar, default to false. If True, the pixel centers are assumed to
5301    *      be at (0.5, 0.5). This is the default behavior of image.resize in
5302    *      TF 2.0. If this parameter is True, then align_corners parameter
5303    *      must be False.
5304    *      Available since NNAPI feature level 4.
5305    *
5306    * Outputs:
5307    * * 0: The output 4-D tensor, of shape
5308    *      [batches, new_height, new_width, depth].
5309    *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
5310    *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5311    *      the scale and zeroPoint must be the same as input0.
5312    *
5313    * Available since NNAPI feature level 3.
5314    */
5315   ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR = 94,
5316 
5317   // Operations below are available since NNAPI feature level 4.
5318 
5319   /**
5320    * Quantized version of {@link ANEURALNETWORKS_LSTM}.
5321    *
5322    * The input and the output use asymmetric quantized types, while the rest
5323    * use symmetric ones.
5324    *
5325    * Inputs:
5326    * * 0: The input to the LSTM cell.
5327    *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5328    *      Shape: [batchSize, inputSize]
5329    * * 1: The input-to-input weights. Optional.
5330    *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5331    *      Shape: [numUnits, inputSize]
5332    * * 2: The input-to-forget weights.
5333    *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5334    *      Shape: [numUnits, inputSize]
5335    * * 3: The input-to-cell weights.
5336    *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5337    *      Shape: [numUnits, inputSize]
5338    * * 4: The input-to-output weights.
5339    *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5340    *      Shape: [numUnits, inputSize]
5341    * * 5: The recurrent-to-input weights. Optional.
5342    *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5343    *      Shape: [numUnits, outputSize]
5344    * * 6: The recurrent-to-forget weights.
5345    *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5346    *      Shape: [numUnits, outputSize]
5347    * * 7: The recurrent-to-cell weights.
5348    *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5349    *      Shape: [numUnits, outputSize]
5350    * * 8: The recurrent-to-output weights.
5351    *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5352    *      Shape: [numUnits, outputSize]
5353    * * 9: The cell-to-input weights (for peephole). Optional.
5354    *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5355    *      Shape: [numUnits]
5356    * * 10: The cell-to-forget weights (for peephole). Optional.
5357    *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5358    *       Shape: [numUnits]
5359    * * 11: The cell-to-output weights (for peephole). Optional.
5360    *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5361    *       Shape: [numUnits]
5362    * * 12: The input gate bias. Quantized with scale being the
5363    *       product of input and weights scales and zeroPoint equal to 0.
5364    *       Optional.
5365    *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5366    *       Shape: [numUnits]
5367    * * 13: The forget gate bias. Quantized with scale being the
5368    *       product of input and weights scales and zeroPoint equal to 0.
5369    *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5370    *       Shape: [numUnits]
5371    * * 14: The cell bias. Quantized with scale being the
5372    *       product of input and weights scales and zeroPoint equal to 0.
5373    *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5374    *       Shape: [numUnits]
5375    * * 15: The output gate bias. Quantized with scale being the
5376    *       product of input and weights scales and zeroPoint equal to 0.
5377    *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5378    *       Shape: [numUnits]
5379    * * 16: The projection weights. Optional.
5380    *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5381    *       Shape: [outputSize, numUnits]
5382    * * 17: The projection bias. Quantized with scale being the
5383    *       product of input and weights scales and zeroPoint equal to 0.
5384    *       Optional.
5385    *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5386    *       Shape: [outputSize]
5387    * * 18: The output from the previous time step.
5388    *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5389    *       Shape: [batchSize, outputSize]
5390    * * 19: The cell state from the previous time step.
5391    *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5392    *       Shape: [batchSize, numUnits]
5393    * * 20: The input layer normalization weights. Used to rescale
5394    *       normalized inputs to activation at input gate. Optional.
5395    *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5396    *       Shape: [numUnits]
5397    * * 21: The forget layer normalization weights. Used to
5398    *       rescale normalized inputs to activation at forget gate. Optional.
5399    *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5400    *       Shape: [numUnits]
5401    * * 22: The cell layer normalization weights. Used to rescale
5402    *       normalized inputs to activation at cell gate. Optional.
5403    *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5404    *       Shape: [numUnits]
5405    * * 23: The output layer normalization weights. Used to
5406    *       rescale normalized inputs to activation at output gate. Optional.
5407    *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5408    *       Shape: [numUnits]
5409    * * 24: The cell clip. If provided the cell state is clipped
5410    *       by this value prior to the cell output activation. Optional.
5411    *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5412    * * 25: The projection clip. If provided and projection is enabled,
5413    *       this is used for clipping the projected values. Optional.
5414    *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5415    * * 26: The scale of the intermediate result of matmul,
5416    *       i.e. input to layer normalization, at input gate.
5417    *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5418    * * 27: The scale of the intermediate result of matmul,
5419    *       i.e. input to layer normalization, at forget gate.
5420    *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5421    * * 28: The scale of the intermediate result of matmul,
5422    *       i.e. input to layer normalization, at cell gate.
5423    *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5424    * * 29: The scale of the intermediate result of matmul,
5425    *       i.e. input to layer normalization, at output gate.
5426    *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5427    * * 30: The zero point of the hidden state, i.e. input to
5428    *       projection.
5429    *       Type: {@link ANEURALNETWORKS_INT32}.
5430    * * 31: The scale of the hidden state, i.e. input to
5431    *       projection.
5432    *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5433    *
5434    * Outputs:
5435    * * 0: The output state (out).
5436    *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5437    *      Shape: [batchSize, outputSize]
5438    * * 1: The cell state (out).
5439    *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5440    *      Shape: [batchSize, numUnits]
5441    * * 2: The output. This is effectively the same as the current
5442    *      "output state (out)" value.
5443    *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5444    *      Shape: [batchSize, outputSize]
5445    *
5446    * Available since NNAPI feature level 4.
5447    */
5448   ANEURALNETWORKS_QUANTIZED_LSTM = 95,
5449 
5450   /**
5451    * Executes one of the two referenced models as determined by a boolean
5452    * value.
5453    *
5454    * The inputs and outputs of the two referenced models must agree with the
5455    * signature of this operation. That is, if the operation has (3 + n) inputs
5456    * and m outputs, both models must have n inputs and m outputs with the same
5457    * types, ranks (if specified), dimensions (if specified), scales,
5458    * zeroPoints, and other operand parameters as the corresponding operation
5459    * inputs and outputs.
5460    *
5461    * Inputs:
5462    * * 0: A value of type {@link ANEURALNETWORKS_TENSOR_BOOL8} and shape [1]
5463    *      that determines which of the two referenced models to execute.
5464    *      The operand must have fully specified dimensions.
5465    * * 1: A {@link ANEURALNETWORKS_MODEL} reference to the model to be
5466    *      executed if the condition is true.
5467    * * 2: A {@link ANEURALNETWORKS_MODEL} reference to the model to be
5468    *      executed if the condition is false.
5469    * * 3 ~ (n + 2): Inputs to be passed to the model selected for execution.
5470    *
5471    * Outputs:
5472    * * 0 ~ (m - 1): Outputs produced by the selected model.
5473    *
5474    * Available since NNAPI feature level 4.
5475    */
5476   ANEURALNETWORKS_IF = 96,
5477 
5478   /**
5479    * Executes the body model until the condition model outputs false.
5480    *
5481    * The inputs to this operation are the condition model, the body model,
5482    * and operand values for the first iteration of the loop. The values are
5483    * implicitly split into three groups of input-output, state-only, and
5484    * input-only values, as described below.
5485    *
5486    * The outputs of this operation are the final values of input-output
5487    * operands.
5488    *
5489    * Both the condition and body model receive (m + k + n) inputs.
5490    * * The first m (m >= 1) inputs are input-output operands. For the first
5491    *   iteration, these are initialized from the corresponding inputs of the
5492    *   WHILE operation. In subsequent iterations, their values come from the
5493    *   corresponding outputs of the body model produced during the previous
5494    *   iteration.
5495    * * The next k (k >= 0) inputs are state-only operands. They are similar to
5496    *   the input-output operands, except that their values are no longer
5497    *   available after the loop terminates.
5498    * * The last n (n >= 0) inputs are input-only operands. Their values come
5499    *   from the corresponding inputs of the WHILE operation.
5500    *
5501    * The body model produces (m + k) outputs.
5502    * * The first m outputs are input-output operands. They become the outputs
5503    *   of the WHILE operation when a termination condition is reached.
5504    * * The last k outputs are state-only operands. Their values are no longer
5505    *   available after the loop terminates.
5506    *
5507    * The numbers m, k, and n are inferred by the runtime as follows:
5508    *     m = (WHILE operation output count)
5509    *     k = (body model output count) - m
5510    *     n = (body model input count) - m - k
5511    *
5512    * The pseudo-code below illustrates the flow of a WHILE operation with
5513    * inputs condition, body, initial_input_output, initial_state, input_only
5514    * (m = 1, k = 1, n = 1):
5515    *
5516    *     input_output = initial_input_output
5517    *     state = initial_state
5518    *     while condition(input_output, state, input_only):
5519    *         input_output, state = body(input_output, state, input_only)
5520    *     return input_output
5521    *
5522    * To prevent infinite loops, there is an implicit execution timeout
5523    * associated with each loop ("loop timeout duration"). See {@link
5524    * ANeuralNetworksExecution_setLoopTimeout}.
5525    *
5526    * Inputs:
5527    * * 0: A {@link ANEURALNETWORKS_MODEL} reference to the condition
5528    *      model. The model must have (m + k + n) inputs with
5529    *      the same types, ranks (if specified), dimensions (if specified),
5530    *      scales, zeroPoints, and other operand parameters as the
5531    *      corresponding inputs of the WHILE operation and exactly one output
5532    *      of {@link ANEURALNETWORKS_TENSOR_BOOL8} and shape [1].
5533    *      The output operand must have fully specified dimensions.
5534    * * 1: A {@link ANEURALNETWORKS_MODEL} reference to the body model.
5535    *      The model must have (m + k + n) inputs and (m + k) outputs with
5536    *      the same types, ranks (if specified), dimensions (if specified),
5537    *      scales, zeroPoints, and other operand parameters as the
5538    *      corresponding inputs and outputs of the WHILE operation.
5539    * * (m inputs): Initial values for input-output operands.
5540    * * (k inputs): Initial values for state-only operands.
5541    * * (n inputs): Values for input-only operands.
5542    *
5543    * Outputs:
5544    * * 0 ~ (m - 1): Outputs produced by the loop.
5545    *
5546    * Available since NNAPI feature level 4.
5547    */
5548   ANEURALNETWORKS_WHILE = 97,
5549 
5550   /**
5551    * Computes exponential linear activation on the input tensor element-wise.
5552    *
5553    * The output is calculated using the following formula:
5554    *
5555    *     ELU(x) = max(0, x) + min(0, alpha * (exp(x) - 1))
5556    *
5557    * Supported tensor {@link OperandCode}:
5558    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5559    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5560    *
5561    * Supported tensor rank: from 1.
5562    *
5563    * Inputs:
5564    * * 0: A tensor, specifying the input. May be zero-sized.
5565    * * 1: A scalar, specifying the alpha parameter.
5566    *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16},
5567    *      the alpha value must be of {@link ANEURALNETWORKS_FLOAT16}.
5568    *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32},
5569    *      the alpha value must be of {@link ANEURALNETWORKS_FLOAT32}.
5570    *
5571    * Outputs:
5572    * * 0: The output tensor of same shape and type as input0.
5573    *
5574    * Available since NNAPI feature level 4.
5575    */
5576   ANEURALNETWORKS_ELU = 98,
5577 
5578   /**
5579    * Computes hard-swish activation on the input tensor element-wise.
5580    *
5581    * Hard swish activation is introduced in
5582    * https://arxiv.org/pdf/1905.02244.pdf
5583    *
5584    * The output is calculated using the following formula:
5585    *
5586    *     h-swish(x) = x * max(0, min(6, (x + 3))) / 6
5587    *
5588    * Supported tensor {@link OperandCode}:
5589    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5590    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5591    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
5592    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5593    *
5594    * Supported tensor rank: from 1.
5595    *
5596    * Inputs:
5597    * * 0: A tensor, specifying the input. May be zero-sized.
5598    *
5599    * Outputs:
5600    * * 0: The output tensor of same shape and type as input0.
5601    *      Scale and zero point of this tensor may be different from the input
5602    *      tensor's parameters.
5603    *
5604    * Available since NNAPI feature level 4.
5605    */
5606   ANEURALNETWORKS_HARD_SWISH = 99,
5607 
5608   /**
5609    * Creates a tensor filled with a scalar value.
5610    *
5611    * Supported output tensor {@link OperandCode}:
5612    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5613    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5614    * * {@link ANEURALNETWORKS_TENSOR_INT32}
5615    *
5616    * Supported tensor rank: from 1.
5617    *
5618    * Inputs:
5619    * * 0: A 1-D tensor, specifying the desired output tensor shape.
5620    * * 1: A scalar, specifying the value to fill the output tensors with.
5621    *      For output tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16},
5622    *      the scalar must be of {@link ANEURALNETWORKS_FLOAT16}.
5623    *      For output tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32},
5624    *      the scalar must be of {@link ANEURALNETWORKS_FLOAT32}.
5625    *      For output tensor of {@link ANEURALNETWORKS_TENSOR_INT32},
5626    *      the scalar must be of {@link ANEURALNETWORKS_INT32}.
5627    *
5628    * Outputs:
5629    * * 0: The output tensor.
5630    *
5631    * Available since NNAPI feature level 4.
5632    */
5633   ANEURALNETWORKS_FILL = 100,
5634 
5635   /**
5636    * Returns the rank of a tensor.
5637    *
5638    * The rank of a tensor is the number of dimensions in it. Also known as
5639    * "order", "degree", "ndims".
5640    *
5641    * Supported tensor {@link OperandCode}:
5642    * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5643    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5644    * * {@link ANEURALNETWORKS_TENSOR_INT32}
5645    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
5646    * * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5647    * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
5648    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
5649    * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}
5650    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5651    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5652    *
5653    * Supported tensor rank: from 1.
5654    *
5655    * Inputs:
5656    * * 0: The input tensor.
5657    *
5658    * Outputs:
5659    * * 0: A scalar of {@link ANEURALNETWORKS_INT32}, specifying the rank
5660    *      of the input tensor.
5661    *
5662    * Available since NNAPI feature level 4.
5663    */
5664   ANEURALNETWORKS_RANK = 101,
5665 } OperationCode;
5666 
5667 /**
5668  * Fused activation function types.
5669  *
5670  * Available since NNAPI feature level 1.
5671  */
5672 typedef enum {
5673   /** NO fused activation function. */
5674   ANEURALNETWORKS_FUSED_NONE = 0,
5675   /** Fused ReLU activation function. */
5676   ANEURALNETWORKS_FUSED_RELU = 1,
5677   /** Fused ReLU1 activation function. */
5678   ANEURALNETWORKS_FUSED_RELU1 = 2,
5679   /** Fused ReLU6 activation function. */
5680   ANEURALNETWORKS_FUSED_RELU6 = 3,
5681 } FuseCode;
5682 
5683 /**
5684  * Implicit padding algorithms.
5685  *
5686  *
5687  * Available since NNAPI feature level 1.
5688  */
5689 typedef enum {
5690   /**
5691    * SAME padding.
5692    * Padding on both ends are the "same":
5693    *     padding_to_beginning =  total_padding / 2
5694    *     padding_to_end       = (total_padding + 1)/2.
5695    * i.e., for even number of padding, padding to both ends are exactly
5696    * the same; for odd number of padding, padding to the ending is bigger
5697    * than the padding to the beginning by 1.
5698    *
5699    * total_padding is a function of input, stride, dilation and filter size.
5700    * It could be computed as follows:
5701    *    out_size = (input + stride - 1) / stride
5702    *    effective_filter_size = (filter_size - 1) * dilation + 1
5703    *    needed_input = (out_size - 1) * stride + effective_filter_size
5704    *    total_padding = max(0, needed_input - input_size)
5705    *  The computation is the same for the horizontal and vertical directions.
5706    */
5707   ANEURALNETWORKS_PADDING_SAME = 1,
5708 
5709   /**
5710    * VALID padding.
5711    * No padding. When the input size is not evenly divisible by
5712    * the filter size, the input at the end that could not fill
5713    * the whole filter tile will simply be ignored.
5714    */
5715   ANEURALNETWORKS_PADDING_VALID = 2,
5716 } PaddingCode;
5717 
5718 /**
5719  * Execution preferences.
5720  *
5721  * Available since NNAPI feature level 1.
5722  */
5723 typedef enum {
5724   /**
5725    * Prefer executing in a way that minimizes battery drain.
5726    * This is desirable for compilations that will be executed often.
5727    */
5728   ANEURALNETWORKS_PREFER_LOW_POWER = 0,
5729   /**
5730    * Prefer returning a single answer as fast as possible, even if this causes
5731    * more power consumption.
5732    */
5733   ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1,
5734   /**
5735    * Prefer maximizing the throughput of successive frames, for example when
5736    * processing successive frames coming from the camera.
5737    */
5738   ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2,
5739 } PreferenceCode;
5740 
5741 /**
5742  * Device types.
5743  *
5744  * The type of NNAPI device.
5745  */
5746 typedef enum {
5747   /** The device type cannot be provided. */
5748   ANEURALNETWORKS_DEVICE_UNKNOWN = 0,
5749   /** The device does not fall into any category below. */
5750   ANEURALNETWORKS_DEVICE_OTHER = 1,
5751   /** The device runs NNAPI models on single or multi-core CPU. */
5752   ANEURALNETWORKS_DEVICE_CPU = 2,
5753   /** The device can run NNAPI models and also accelerate graphics APIs such
5754    * as OpenGL ES and Vulkan. */
5755   ANEURALNETWORKS_DEVICE_GPU = 3,
5756   /** Dedicated accelerator for Machine Learning workloads. */
5757   ANEURALNETWORKS_DEVICE_ACCELERATOR = 4,
5758 } DeviceTypeCode;
5759 
5760 /**
5761  * NNAPI feature levels.
5762  *
5763  * Each update of the NNAPI specification yields a new NNAPI feature level enum value.
5764  * NNAPI feature level corrseponds to an NNAPI specification version that a driver
5765  * and/or the NNAPI runtime can implement.
5766  *
5767  * A feature level up to and including "FEATURE_LEVEL_5" maps directly to
5768  * the Android API level that introduced the corresponding update of the NNAPI
5769  * specification. Feature levels after Android API level 31 have no association with
5770  * API level because the NNAPI specification can be updated between Android API
5771  * releases. Outputs of {@link ANeuralNetworksDevice_getFeatureLevel} and
5772  * {@link ANeuralNetworks_getRuntimeFeatureLevel} must be compared against
5773  * these enum values instead of the Android API level.
5774  */
5775 typedef enum {
5776   /** NNAPI specification available in Android O-MR1, Android NNAPI feature level 1 */
5777   ANEURALNETWORKS_FEATURE_LEVEL_1 = 27,
5778   /** NNAPI specification available in Android P, Android NNAPI feature level 2 */
5779   ANEURALNETWORKS_FEATURE_LEVEL_2 = 28,
5780   /** NNAPI specification available in Android Q, Android NNAPI feature level 3 */
5781   ANEURALNETWORKS_FEATURE_LEVEL_3 = 29,
5782   /** NNAPI specification available in Android R, Android NNAPI feature level 4 */
5783   ANEURALNETWORKS_FEATURE_LEVEL_4 = 30,
5784   /**
5785    * NNAPI specification available in Android S, Android NNAPI feature level 5.
5786    * After Android S, the NNAPI specification can be updated between Android
5787    * API releases.
5788    */
5789   ANEURALNETWORKS_FEATURE_LEVEL_5 = 31,
5790 } FeatureLevelCode;
5791 
5792 /**
5793  * Result codes.
5794  *
5795  * <p>Any NNAPI function can return any result code, including result codes not
5796  * currently documented. Any value other than {@link ANEURALNETWORKS_NO_ERROR}
5797  * indicates a failure of some kind.</p>
5798  *
5799  * <p>Additional information about the nature of a failure can be obtained from
5800  * the device log after enabling NNAPI debugging by setting the debug.nn.vlog
5801  * property to 1, e.g., by calling "adb shell setprop debug.nn.vlog 1".</p>
5802  *
5803  * Available since NNAPI feature level 1.
5804  */
5805 typedef enum {
5806   /**
5807    * Operation was successful.
5808    */
5809   ANEURALNETWORKS_NO_ERROR = 0,
5810 
5811   /**
5812    * Failure caused by not enough available memory.
5813    */
5814   ANEURALNETWORKS_OUT_OF_MEMORY = 1,
5815 
5816   ANEURALNETWORKS_INCOMPLETE = 2,
5817 
5818   /**
5819    * Failure caused by unexpected null argument.
5820    */
5821   ANEURALNETWORKS_UNEXPECTED_NULL = 3,
5822 
5823   /**
5824    * Failure caused by invalid function arguments, invalid model definition,
5825    * invalid execution definition or invalid data at execution time.
5826    */
5827   ANEURALNETWORKS_BAD_DATA = 4,
5828 
5829   /**
5830    * Failure caused by failed model execution.
5831    */
5832   ANEURALNETWORKS_OP_FAILED = 5,
5833 
5834   /**
5835    * Failure caused by object being in the wrong state.
5836    */
5837   ANEURALNETWORKS_BAD_STATE = 6,
5838 
5839   /**
5840    * Failure caused by not being able to map a file into memory.
5841    * This may be caused by a file descriptor not being mappable, or an AHardwareBuffer
5842    * not supported by the device.
5843    * Mitigate by reading its content into memory.
5844    */
5845   ANEURALNETWORKS_UNMAPPABLE = 7,
5846 
5847   /**
5848    * Failure caused by insufficient buffer size provided to a model output.
5849    */
5850   ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE = 8,
5851 
5852   /**
5853    * Failure caused by a device not being available.
5854    */
5855   ANEURALNETWORKS_UNAVAILABLE_DEVICE = 9,
5856 
5857   /**
5858    * Failure because a deadline could not be met for a task, but future
5859    * deadlines may still be met for the same task after a short delay.
5860    *
5861    * Available since NNAPI feature level 4.
5862    */
5863   ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT = 10,
5864 
5865   /**
5866    * Failure because a deadline could not be met for a task, and future
5867    * deadlines will likely also not be met for the same task even after a
5868    * short delay.
5869    *
5870    * Available since NNAPI feature level 4.
5871    */
5872   ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT = 11,
5873 
5874   /**
5875    * Failure because of a resource limitation within the driver, but future
5876    * calls for the same task may still succeed after a short delay.
5877    *
5878    * Available since NNAPI feature level 4.
5879    */
5880   ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT = 12,
5881 
5882   /**
5883    * Failure because of a resource limitation within the driver, and future
5884    * calls for the same task will likely also fail even after a short
5885    * delay.
5886    *
5887    * Available since NNAPI feature level 4.
5888    */
5889   ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT = 13,
5890 
5891   /**
5892    * Failure indicating an object is in a dead state.
5893    *
5894    * Available since NNAPI feature level 4.
5895    */
5896   ANEURALNETWORKS_DEAD_OBJECT = 14,
5897 } ResultCode;
5898 
5899 /**
5900  * For {@link ANeuralNetworksModel_setOperandValue}, values with a
5901  * length smaller or equal to this will be immediately copied into
5902  * the model. The size is in bytes.
5903  *
5904  * Available since NNAPI feature level 1.
5905  */
5906 enum { ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES = 128 };
5907 
5908 /**
5909  * For {@link ANeuralNetworksCompilation_setCaching}, specify the size
5910  * of the cache token required from the application. The size is in bytes.
5911  *
5912  * Available since NNAPI feature level 3.
5913  */
5914 enum { ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN = 32 };
5915 
5916 /**
5917  * Different duration measurements.
5918  *
5919  * Durations are measured in nanoseconds.
5920  *
5921  * Available since NNAPI feature level 3.
5922  */
5923 typedef enum {
5924   // Execution time on hardware (not driver, which runs on host processor).
5925   ANEURALNETWORKS_DURATION_ON_HARDWARE = 0,
5926   // Execution time in driver (including time on hardware).  Excludes overhead
5927   // such as that of the runtime itself and the IPC needed for the runtime to
5928   // communicate with the driver.
5929   ANEURALNETWORKS_DURATION_IN_DRIVER = 1,
5930   // Execution time on hardware, after all dependencies have been signaled.
5931   // If no dependencies specified (for example, if the execution was scheduled other
5932   // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the
5933   // reported time will be the same as ANEURALNETWORKS_DURATION_ON_HARDWARE.
5934   // Available since NNAPI feature level 4.
5935   ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE = 2,
5936   // Execution time in driver, after all dependencies have been signaled. Excludes
5937   // overhead such as that of the runtime itself and the IPC needed for the runtime
5938   // to communicate with the driver.
5939   // If no dependencies specified (for example, if the execution was scheduled other
5940   // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the
5941   // reported time will be the same as ANEURALNETWORKS_DURATION_IN_DRIVER.
5942   // Available since NNAPI feature level 4.
5943   ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER = 3,
5944 } DurationCode;
5945 
5946 /**
5947  * Relative execution priority.
5948  *
5949  * Available since NNAPI feature level 4.
5950  */
5951 typedef enum {
5952   ANEURALNETWORKS_PRIORITY_LOW = 90,
5953   ANEURALNETWORKS_PRIORITY_MEDIUM = 100,
5954   ANEURALNETWORKS_PRIORITY_HIGH = 110,
5955   ANEURALNETWORKS_PRIORITY_DEFAULT = ANEURALNETWORKS_PRIORITY_MEDIUM,
5956 } PriorityCode;
5957 
5958 /**
5959  * ANeuralNetworksMemory is an opaque type that represents memory.
5960  *
5961  * This type is used to represent shared memory, memory mapped files,
5962  * and similar memories.
5963  *
5964  * By using shared memory, a program can efficiently communicate to the
5965  * runtime and drivers the tensors that define a model. See
5966  * {@link ANeuralNetworksModel_setOperandValueFromMemory}. An application
5967  * should typically create one shared memory object that contains every constant tensor
5968  * needed to define a model. {@link ANeuralNetworksMemory_createFromFd} can be used to
5969  * create shared memory from a file handle.
5970  * {@link ANeuralNetworksMemory_createFromAHardwareBuffer} can be used to
5971  * create shared memory from an AHardwareBuffer handle.
5972  *
5973  * Memory objects can also be used to specify the input and output arguments of
5974  * an execution. See {@link ANeuralNetworksExecution_setInputFromMemory}
5975  * and {@link ANeuralNetworksExecution_setOutputFromMemory}.
5976  *
5977  * When calling {@link ANeuralNetworksModel_setOperandValueFromMemory},
5978  * {@link ANeuralNetworksExecution_setInputFromMemory} and
5979  * {@link ANeuralNetworksExecution_setOutputFromMemory}, each operand in the shared
5980  * memory object must be aligned on a boundary of a byte size that is a multiple
5981  * of the element type byte size, e.g., a tensor with
5982  * {@link ANEURALNETWORKS_TENSOR_FLOAT32} type must be aligned on 4-byte boundary.
5983  *
5984  * It is the application's responsibility to ensure that there are no uses of
5985  * the memory after calling {@link ANeuralNetworksMemory_free}. This includes
5986  * any model which references this memory because of a call to
5987  * {@link ANeuralNetworksModel_setOperandValueFromMemory}, any compilation
5988  * created using such a model, any execution object or burst object created
5989  * using such a compilation, or any execution which references this memory
5990  * because of a call to {@link ANeuralNetworksExecution_setInputFromMemory} or
5991  * {@link ANeuralNetworksExecution_setOutputFromMemory}.
5992  *
5993  * Available since NNAPI feature level 1.
5994  *
5995  * Starting at NNAPI feature level 4, the application may request creation of device native memory
5996  * from {@link ANeuralNetworksMemoryDesc} to avoid potential memory copying and transformation
5997  * overhead between executions. See also {@link ANeuralNetworksMemoryDesc} and
5998  * {@link ANeuralNetworksMemory_createFromDesc}.
5999  */
6000 typedef struct ANeuralNetworksMemory ANeuralNetworksMemory;
6001 
6002 /**
6003  * ANeuralNetworksModel is an opaque type that contains a description of the
6004  * mathematical operations that constitute the model.
6005  *
6006  * <p>Build the model by calling<ul>
6007  * <li>{@link ANeuralNetworksModel_create}</li>
6008  * <li>{@link ANeuralNetworksModel_addOperation}</li>
6009  * <li>{@link ANeuralNetworksModel_addOperand}</li>
6010  * </ul>
6011  *
6012  * This forms a graph in which each operation and operand is a node, a
6013  * directed edge from an operand to an operation indicates that the
6014  * operand is an input to the operation, and a directed edge from an
6015  * operation to an operand indicates that the operand is an output
6016  * from the operation. This graph must be acyclic.
6017  *
6018  * A model is completed by calling {@link ANeuralNetworksModel_finish}.
6019  * A model is destroyed by calling {@link ANeuralNetworksModel_free}.
6020  *
6021  * <p>A model cannot be modified once {@link ANeuralNetworksModel_finish}
6022  * has been called on it.</p>
6023  *
6024  * <p>It is the application's responsibility to make sure that only one thread
6025  * modifies a model at a given time. It is however safe for more than one
6026  * thread to use the model once {@link ANeuralNetworksModel_finish} has returned.</p>
6027  *
6028  * <p>It is also the application's responsibility to ensure that there are no
6029  * other uses of the model after calling {@link ANeuralNetworksModel_free}.
6030  * This includes any compilation, execution object or burst object created using
6031  * the model.</p>
6032  *
6033  * Available since NNAPI feature level 1.
6034  */
6035 typedef struct ANeuralNetworksModel ANeuralNetworksModel;
6036 
6037 /**
6038  * ANeuralNetworksCompilation is an opaque type that can be used to compile
6039  * a machine learning model.
6040  *
6041  * <p>To use:<ul>
6042  *    <li>Create a new compilation instance by calling the
6043  *        {@link ANeuralNetworksCompilation_create} function or
6044  *        {@link ANeuralNetworksCompilation_createForDevices}.</li>
6045  *    <li>Set any desired properties on the compilation (for example,
6046  *        {@link ANeuralNetworksCompilation_setPreference}).</li>
6047  *    <li>Optionally, set the caching signature and the cache directory on the
6048  *        compilation by calling {@link ANeuralNetworksCompilation_setCaching}.</li>
6049  *    <li>Complete the compilation with {@link ANeuralNetworksCompilation_finish}.</li>
6050  *    <li>Use the compilation as many times as needed
6051  *        with {@link ANeuralNetworksExecution_create} and
6052  *        {@link ANeuralNetworksBurst_create}.</li>
6053  *    <li>Destroy the compilation with {@link ANeuralNetworksCompilation_free}
6054  *        once all executions using the compilation have completed.</li></ul></p>
6055  *
6056  * A compilation is completed by calling {@link ANeuralNetworksCompilation_finish}.
6057  * A compilation is destroyed by calling {@link ANeuralNetworksCompilation_free}.
6058  *
6059  * <p>A compilation cannot be modified once {@link ANeuralNetworksCompilation_finish}
6060  * has been called on it.</p>
6061  *
6062  * <p>It is the application's responsibility to make sure that only
6063  * one thread modifies a compilation at a given time. It is however
6064  * safe for more than one thread to use the compilation once
6065  * {@link ANeuralNetworksCompilation_finish} has returned.</p>
6066  *
6067  * <p>It is also the application's responsibility to ensure that there are no other
6068  * uses of the compilation after calling {@link ANeuralNetworksCompilation_free}.
6069  * This includes any execution object or burst object created using the compilation,
6070  * or any memory descriptor with the compilation as part of one of the roles specified by
6071  * {@link ANeuralNetworksMemoryDesc_addInputRole} or
6072  * {@link ANeuralNetworksMemoryDesc_addOutputRole}.</p>
6073  *
6074  * Available since NNAPI feature level 1.
6075  */
6076 typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation;
6077 
6078 /**
6079  * ANeuralNetworksExecution is an opaque type that can be used to apply a machine
6080  * learning model to a set of inputs.
6081  *
6082  * <p>To use:<ul>
6083  *    <li>Create a new execution instance by calling the
6084  *        {@link ANeuralNetworksExecution_create} function.</li>
6085  *    <li>Associate input buffers or memory regions to the model inputs with
6086  *        {@link ANeuralNetworksExecution_setInput} or
6087  *        {@link ANeuralNetworksExecution_setInputFromMemory}.</li>
6088  *    <li>Associate output buffers or memory regions to the model outputs with
6089  *        {@link ANeuralNetworksExecution_setOutput} or
6090  *        {@link ANeuralNetworksExecution_setOutputFromMemory}.</li>
6091  *    <li>Optionally, configure the execution with
6092  *        {@link ANeuralNetworksExecution_setLoopTimeout},
6093  *        {@link ANeuralNetworksExecution_setMeasureTiming},
6094  *        {@link ANeuralNetworksExecution_setReusable}, or
6095  *        {@link ANeuralNetworksExecution_setTimeout}.
6096  *    <li>Apply the model with one of the following:</li><ul>
6097  *        <li>Asynchronously with {@link ANeuralNetworksExecution_startCompute}
6098  *            or with {@link ANeuralNetworksExecution_startComputeWithDependencies},
6099  *            waiting for the execution to complete with
6100  *            {@link ANeuralNetworksEvent_wait}.</li>
6101  *        <li>Synchronously with {@link ANeuralNetworksExecution_compute}.</li>
6102  *        <li>Synchronously as part of an execution burst with
6103  *            {@link ANeuralNetworksExecution_burstCompute}.</li></ul>
6104  *        If the execution has been marked as reusable, then you can
6105  *        apply the model more than once.
6106  *    <li>Destroy the execution with
6107  *        {@link ANeuralNetworksExecution_free}.</li></ul></p>
6108  *
6109  * <p>An output buffer or memory region must not overlap with any
6110  * other output buffer or memory region, with an input buffer or
6111  * memory region, or with an operand value in a memory object
6112  * ({@link ANeuralNetworksModel_setOperandValueFromMemory}).</p>
6113  *
6114  * <p>An execution is in the preparation state after it is created by
6115  * {@link ANeuralNetworksExecution_create}. An execution may only be modified in the preparation
6116  * state. Scheduling a computation by calling {@link ANeuralNetworksExecution_burstCompute},
6117  * {@link ANeuralNetworksExecution_compute}, {@link ANeuralNetworksExecution_startCompute},
6118  * or {@link ANeuralNetworksExecution_startComputeWithDependencies} will change the state of
6119  * the execution object to the computation state. When the computation completes, the state of
6120  * the execution object will change from the computation state to the completed state.
6121  * The computation is completed when {@link ANeuralNetworksExecution_compute},
6122  * {@link ANeuralNetworksExecution_burstCompute}, or {@link ANeuralNetworksEvent_wait}
6123  * has returned.</p>
6124  *
6125  * <p>An execution can be applied to a model with
6126  * {@link ANeuralNetworksExecution_burstCompute},
6127  * {@link ANeuralNetworksExecution_compute},
6128  * {@link ANeuralNetworksExecution_startCompute} or
6129  * {@link ANeuralNetworksExecution_startComputeWithDependencies} only once. Create new
6130  * executions to do new evaluations of the model.</p>
6131  *
6132  * <p>Starting at NNAPI feature level 5, the application may call
6133  * {@link ANeuralNetworksExecution_setReusable} to set an execution to be reusable for multiple
6134  * computations. The application may schedule and evaluate a computation again from the completed
6135  * state of a reusable execution. The execution cannot be modified between computations.</p>
6136  *
6137  * <p>It is the application's responsibility to make sure that only one thread
6138  * modifies an execution at a given time. It is however safe for more than one
6139  * thread to use {@link ANeuralNetworksEvent_wait} at the same time.</p>
6140  *
6141  * <p>It is also the application's responsibility to ensure that the execution
6142  * either has never been scheduled or has completed (i.e., that
6143  * {@link ANeuralNetworksExecution_burstCompute},
6144  * {@link ANeuralNetworksExecution_compute}, or
6145  * {@link ANeuralNetworksEvent_wait} has returned) before calling
6146  * {@link ANeuralNetworksExecution_free}.</p>.
6147  *
6148  * <p>It is also the application's responsibility to ensure that there are no other
6149  * uses of the execution after calling {@link ANeuralNetworksExecution_free}.</p>
6150  *
6151  * <p>It is the application's responsibility to ensure that there are no concurrent computations
6152  * scheduled and evaluated on the same execution, either by means of
6153  * {@link ANeuralNetworksExecution_compute} or
6154  * {@link ANeuralNetworksExecution_burstCompute} (which are synchronous)
6155  * in different threads, or by means of
6156  * {@link ANeuralNetworksExecution_startCompute} or
6157  * {@link ANeuralNetworksExecution_startComputeWithDependencies} (which are asynchronous).
6158  * It is however safe to schedule and evaluate multiple computations on different executions
6159  * concurrently. (Concurrent uses of {@link ANeuralNetworksExecution_burstCompute} must be on
6160  * different burst objects.) The runtime makes no guarantee on the ordering of
6161  * completion of executions. If it's important to the application, the
6162  * application should enforce the ordering by ensuring that one execution
6163  * completes before the next is scheduled (for example, by scheduling all
6164  * executions synchronously within a single thread, or by scheduling all
6165  * executions asynchronously and using {@link ANeuralNetworksEvent_wait} between
6166  * calls to {@link ANeuralNetworksExecution_startCompute}); or by using
6167  * {@link ANeuralNetworksExecution_startComputeWithDependencies} to make the execution wait for a
6168  * list of events to be signaled before starting the actual evaluation.</p>
6169  *
6170  * Available since NNAPI feature level 1.
6171  */
6172 typedef struct ANeuralNetworksExecution ANeuralNetworksExecution;
6173 
6174 /**
6175  * Parameters for ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL operand.
6176  */
6177 typedef struct ANeuralNetworksSymmPerChannelQuantParams {
6178   /** The index of the channel dimension. */
6179   uint32_t channelDim;
6180   /** The size of the scale array. Should be equal to dimension[channelDim] of the Operand. */
6181   uint32_t scaleCount;
6182   /** The array of scaling values for each channel. Each value must be greater than zero. */
6183   const float *scales;
6184 } ANeuralNetworksSymmPerChannelQuantParams;
6185 
6186 /**
6187  * ANeuralNetworksBurst is an opaque type that can be used to reduce the latency
6188  * of a rapid sequence of executions. It will likely cause overhead if only used
6189  * for a single execution.
6190  *
6191  * ANeuralNetworksBurst serves as a context object for any number of inferences
6192  * using {@link ANeuralNetworksExecution} objects. An ANeuralNetworksBurst
6193  * object and the {@link ANeuralNetworksExecution} objects used with it must all
6194  * have been created from the same {@link ANeuralNetworksCompilation} object.
6195  *
6196  * This object is also used as a hint to drivers, providing insight to the
6197  * lifetime of a rapid sequence of executions. For example, a driver may choose
6198  * to increase the clock frequency of its accelerator for the lifetime of a
6199  * burst object.
6200  *
6201  * <p>To use:<ul>
6202  *    <li>Create a new burst object by calling the
6203  *        {@link ANeuralNetworksBurst_create} function.</li>
6204  *    <li>For each execution:</li><ul>
6205  *        <li>Create {@link ANeuralNetworksExecution} and configure its
6206  *            properties (see {@link ANeuralNetworksExecution} for details).</li>
6207  *        <li>Apply the model synchronously with
6208  *            {@link ANeuralNetworksExecution_burstCompute}, reusing the same
6209  *            {@link ANeuralNetworksBurst} with the new
6210  *            {@link ANeuralNetworksExecution}.</li>
6211  *        <li>Use and free the {@link ANeuralNetworksExecution}.</li></ul>
6212  *    <li>Destroy the burst with
6213  *        {@link ANeuralNetworksBurst_free}.</li></ul></p>
6214  *
6215  * Available since NNAPI feature level 3.
6216  */
6217 typedef struct ANeuralNetworksBurst ANeuralNetworksBurst;
6218 
6219 /**
6220  * ANeuralNetworksOperandType describes the type of an operand.
6221  *
6222  * This structure is used to describe both scalars and tensors.
6223  *
6224  * A tensor operand type with all dimensions specified is "fully
6225  * specified".  Whenever possible (i.e., whenever the dimensions are
6226  * known at model construction time), a tensor operand type should be
6227  * (but is not required to be) fully specified, in order to enable the
6228  * best possible performance.
6229  *
6230  * If a tensor operand's type is not fully specified, the dimensions
6231  * of the operand are deduced from the operand types and values of the
6232  * operation for which that operand is an output or from the corresponding
6233  * {@link ANEURALNETWORKS_IF} or {@link ANEURALNETWORKS_WHILE} operation input
6234  * operand type in the case of referenced model input operands.
6235  *
6236  * <p>In the following situations, a tensor operand type must be fully
6237  * specified:<ul>
6238  *     <li>The operand has a constant value, set by
6239  *         {@link ANeuralNetworksModel_setOperandValue} (with a
6240  *         non-nullptr buffer) or
6241  *         {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li>
6242  *     <li>The operand is a model input (see
6243  *         {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main
6244  *         model within a compilation.  A fully specified tensor operand type
6245  *         must either be provided to {@link ANeuralNetworksModel_addOperand};
6246  *         or it must be provided to the corresponding
6247  *         {@link ANeuralNetworksExecution_setInput}, or
6248  *         {@link ANeuralNetworksExecution_setInputFromMemory}.
6249  *         EXCEPTION: If the input is optional and omitted
6250  *         (by passing nullptr for buffer to
6251  *         {@link ANeuralNetworksExecution_setInput}) then it need
6252  *         not have a fully specified tensor operand type.</li>
6253  *     <li>The operand is a model output (see
6254  *         {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main
6255  *         model within a compilation and is to be used with {@link
6256  *         ANeuralNetworksExecution_startComputeWithDependencies}.
6257  *         A fully specified tensor operand type must either be provided
6258  *         to {@link ANeuralNetworksModel_addOperand}; or it must be
6259  *         provided to the corresponding
6260  *         {@link ANeuralNetworksExecution_setOutput}, or
6261  *         {@link ANeuralNetworksExecution_setOutputFromMemory}.</li></ul>
6262  *
6263  * A tensor operand type of specified rank but some number of
6264  * unspecified dimensions is represented by setting dimensionCount to
6265  * the rank and each unspecified dimension to 0.
6266  *
6267  * Available since NNAPI feature level 1.
6268  *
6269  * Starting at NNAPI feature level 3, a tensor operand type of unspecified rank is
6270  * represented by setting dimensionCount to 0 and dimensions to NULL (just as if
6271  * it were a scalar operand type).
6272  */
6273 typedef struct ANeuralNetworksOperandType {
6274   /**
6275    * The data type, e.g ANEURALNETWORKS_FLOAT32.
6276    */
6277   int32_t type;
6278 
6279   /**
6280    * The number of dimensions (rank).
6281    *
6282    * Must be 0 for scalars.
6283    */
6284   uint32_t dimensionCount;
6285 
6286   /**
6287    * The dimensions of the tensor.
6288    *
6289    * Must be nullptr for scalars.
6290    */
6291   const uint32_t *dimensions;
6292 
6293   /**
6294    * The quantization scale.
6295    *
6296    * Must be 0 when not applicable to an operand type.
6297    *
6298    * See {@link OperandCode}.
6299    */
6300   float scale;
6301 
6302   /**
6303    * The quantization zero point.
6304    *
6305    * Must be 0 when not applicable to an operand type.
6306    *
6307    * See {@link OperandCode}.
6308    */
6309   int32_t zeroPoint;
6310 } ANeuralNetworksOperandType;
6311 
6312 /**
6313  * Aliasing to {@link OperationCode}, used in function
6314  * {@link ANeuralNetworksModel_addOperation}.
6315  */
6316 typedef int32_t ANeuralNetworksOperationType;
6317 
6318 /**
6319  * ANeuralNetworksEvent is an opaque type that represents an event
6320  * that will be signaled once an execution completes.
6321  *
6322  * Available since NNAPI feature level 1.
6323  */
6324 typedef struct ANeuralNetworksEvent ANeuralNetworksEvent;
6325 
6326 /**
6327  * ANeuralNetworksDevice is an opaque type that represents a device.
6328  *
6329  * This type is used to query basic properties and supported operations of the corresponding
6330  * device, and control which device(s) a model is to be run on.
6331  *
6332  * Available since NNAPI feature level 3.
6333  */
6334 typedef struct ANeuralNetworksDevice ANeuralNetworksDevice;
6335 
6336 /**
6337  * ANeuralNetworksMemoryDesc is an opaque type that represents a memory descriptor.
6338  *
6339  * A memory descriptor describes the properties of a memory object, and is used by
6340  * {@link ANeuralNetworksMemory_createFromDesc}.
6341  *
6342  * To use:
6343  *   - Create a new memory descriptor by calling {@link ANeuralNetworksMemoryDesc_create}.
6344  *   - Specify all of the intended input and output roles by calling
6345  *     {@link ANeuralNetworksMemoryDesc_addInputRole} and
6346  *     {@link ANeuralNetworksMemoryDesc_addOutputRole}.
6347  *   - Optionally, specify the memory dimensions by calling
6348  *     {@link ANeuralNetworksMemoryDesc_setDimensions}.
6349  *   - Complete the memory descriptor with {@link ANeuralNetworksMemoryDesc_finish}.
6350  *   - Use the memory descriptor as many times as needed with
6351  *     {@link ANeuralNetworksMemory_createFromDesc}.
6352  *   - Destroy the memory descriptor with {@link ANeuralNetworksMemoryDesc_free}.
6353  *
6354  * A memory descriptor is completed by calling {@link ANeuralNetworksMemoryDesc_finish}.
6355  * A memory descriptor is destroyed by calling {@link ANeuralNetworksMemoryDesc_free}.
6356  *
6357  * A memory descriptor must not be modified once {@link ANeuralNetworksMemoryDesc_finish}
6358  * has been called on it.
6359  *
6360  * It is the application's responsibility to make sure that only
6361  * one thread modifies a memory descriptor at a given time. It is however
6362  * safe for more than one thread to use the memory descriptor once
6363  * {@link ANeuralNetworksMemoryDesc_finish} has returned.
6364  *
6365  * It is also the application's responsibility to ensure that there are no other
6366  * uses of the memory descriptor after calling {@link ANeuralNetworksMemoryDesc_free}.
6367  * It is however safe to continue using a {@link ANeuralNetworksMemory} object created
6368  * from the memory descriptor.
6369  *
6370  * Available since NNAPI feature level 4.
6371  */
6372 typedef struct ANeuralNetworksMemoryDesc ANeuralNetworksMemoryDesc;
6373 
6374 __END_DECLS
6375 
6376 #endif  // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_TYPES_H
6377 
6378 /** @} */
6379