• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /**
18  * @addtogroup NeuralNetworks
19  * @{
20  */
21 
22 /**
23  * @file NeuralNetworksTypes.h
24  */
25 
26 #ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_NEURAL_NETWORKS_TYPES_H
27 #define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_NEURAL_NETWORKS_TYPES_H
28 
29 /******************************************************************
30  *
31  * IMPORTANT NOTICE:
32  *
33  *   This file is part of Android's set of stable system headers
34  *   exposed by the Android NDK (Native Development Kit).
35  *
36  *   Third-party source AND binary code relies on the definitions
37  *   here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES.
38  *
39  *   - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES)
40  *   - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS
41  *   - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY
42  *   - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES
43  */
44 
45 #include <stdbool.h>
46 #include <stddef.h>
47 #include <stdint.h>
48 #include <sys/cdefs.h>
49 
50 #ifdef __ANDROID__
51 #include <android/hardware_buffer.h>
52 #endif  // __ANDROID__
53 
54 __BEGIN_DECLS
55 
56 /**
57  * Operand types.
58  *
59  * The type of an operand in a model.
60  *
61  * Types prefaced with ANEURALNETWORKS_TENSOR_* must be used for tensor data (i.e., tensors
62  * with at least one dimension). Types not prefaced by ANEURALNETWORKS_TENSOR_* represent
63  * scalar values and must have no dimensions.
64  *
65  * Although we define many types, most operators accept just a few
66  * types. Most used are {@link ANEURALNETWORKS_TENSOR_FLOAT32},
67  * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
68  * and {@link ANEURALNETWORKS_INT32}.
69  *
70  * Available since NNAPI feature level 1.
71  */
72 typedef enum {
73     /** A 32 bit floating point scalar value. */
74     ANEURALNETWORKS_FLOAT32 = 0,
75     /** A signed 32 bit integer scalar value. */
76     ANEURALNETWORKS_INT32 = 1,
77     /** An unsigned 32 bit integer scalar value. */
78     ANEURALNETWORKS_UINT32 = 2,
79     /** A tensor of 32 bit floating point values. */
80     ANEURALNETWORKS_TENSOR_FLOAT32 = 3,
81     /** A tensor of 32 bit integer values. */
82     ANEURALNETWORKS_TENSOR_INT32 = 4,
83     /**
84      * A tensor of 8 bit unsigned integers that represent real numbers.
85      *
86      * Attached to this tensor are two numbers that can be used to convert the
87      * 8 bit integer to the real value and vice versa. These two numbers are:
88      * - scale: a 32 bit floating point value greater than zero.
89      * - zeroPoint: a 32 bit integer, in range [0, 255].
90      *
91      * The formula is:
92      *   real_value = (integer_value - zeroPoint) * scale.
93      */
94     ANEURALNETWORKS_TENSOR_QUANT8_ASYMM = 5,
95     /**
96      * An 8 bit boolean scalar value.
97      *
98      * Values of this operand type are either true or false. A zero value
99      * represents false; any other value represents true.
100      *
101      * Available since NNAPI feature level 3.
102      */
103     ANEURALNETWORKS_BOOL = 6,
104     /**
105      * A tensor of 16 bit signed integers that represent real numbers.
106      *
107      * Attached to this tensor is a number representing real value scale that is
108      * used to convert the 16 bit number to a real value in the following way:
109      * realValue = integerValue * scale.
110      *
111      * scale is a 32 bit floating point with value greater than zero.
112      *
113      * Available since NNAPI feature level 3.
114      */
115     ANEURALNETWORKS_TENSOR_QUANT16_SYMM = 7,
116     /**
117      * A tensor of IEEE 754 16 bit floating point values.
118      *
119      * Available since NNAPI feature level 3.
120      */
121     ANEURALNETWORKS_TENSOR_FLOAT16 = 8,
122     /**
123      * A tensor of 8 bit boolean values.
124      *
125      * Values of this operand type are either true or false. A zero value
126      * represents false; any other value represents true.
127      *
128      * Available since NNAPI feature level 3.
129      */
130     ANEURALNETWORKS_TENSOR_BOOL8 = 9,
131     /**
132      * An IEEE 754 16 bit floating point scalar value.
133      *
134      * Available since NNAPI feature level 3.
135      */
136     ANEURALNETWORKS_FLOAT16 = 10,
137     /**
138      * A tensor of 8 bit signed integers that represent real numbers.
139      *
140      * This tensor is associated with additional fields that can
141      * be used to convert the 8 bit signed integer to the real value and vice versa.
142      * These fields are:
143      * - channelDim: a 32 bit unsigned integer indicating channel dimension.
144      * - scales: an array of positive 32 bit floating point values.
145      * The size of the scales array must be equal to dimensions[channelDim].
146      *
147      * {@link ANeuralNetworksModel_setOperandSymmPerChannelQuantParams} must be used
148      * to set the parameters for an Operand of this type.
149      *
150      * The channel dimension of this tensor must not be unknown (dimensions[channelDim] != 0).
151      *
152      * The formula is:
153      * realValue[..., C, ...] =
154      *     integerValue[..., C, ...] * scales[C]
155      * where C is an index in the Channel dimension.
156      *
157      * Available since NNAPI feature level 3.
158      */
159     ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL = 11,
160     /**
161      * A tensor of 16 bit unsigned integers that represent real numbers.
162      *
163      * Attached to this tensor are two numbers that can be used to convert the
164      * 16 bit integer to the real value and vice versa. These two numbers are:
165      * - scale: a 32 bit floating point value greater than zero.
166      * - zeroPoint: a 32 bit integer, in range [0, 65535].
167      *
168      * The formula is:
169      * real_value = (integer_value - zeroPoint) * scale.
170      *
171      * Available since NNAPI feature level 3.
172      */
173     ANEURALNETWORKS_TENSOR_QUANT16_ASYMM = 12,
174     /**
175      * A tensor of 8 bit signed integers that represent real numbers.
176      *
177      * Attached to this tensor is a number representing real value scale that is
178      * used to convert the 8 bit number to a real value in the following way:
179      * realValue = integerValue * scale.
180      *
181      * scale is a 32 bit floating point with value greater than zero.
182      *
183      * Available since NNAPI feature level 3.
184      */
185     ANEURALNETWORKS_TENSOR_QUANT8_SYMM = 13,
186     /**
187      * A tensor of 8 bit signed integers that represent real numbers.
188      *
189      * Attached to this tensor are two numbers that can be used to convert the
190      * 8 bit integer to the real value and vice versa. These two numbers are:
191      * - scale: a 32 bit floating point value greater than zero.
192      * - zeroPoint: a 32 bit integer, in range [-128, 127].
193      *
194      * The formula is:
195      * real_value = (integer_value - zeroPoint) * scale.
196      *
197      * Available since NNAPI feature level 4.
198      */
199     ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED = 14,
200     /**
201      * A reference to a model.
202      *
203      * {@link ANeuralNetworksModel_setOperandValueFromModel} must be used to set
204      * the value for an Operand of this type.
205      *
206      * Available since NNAPI feature level 4.
207      */
208     ANEURALNETWORKS_MODEL = 15,
209 } OperandCode;
210 
211 /**
212  * Operation types.
213  *
214  * The type of an operation in a model.
215  *
216  * Available since NNAPI feature level 1.
217  */
218 typedef enum {
219     // Operations below are available since NNAPI feature level 1.
220 
221     /**
222      * Adds two tensors, element-wise.
223      *
224      * Takes two input tensors of identical {@link OperandCode} and compatible
225      * dimensions. The output is the sum of both input tensors, optionally
226      * modified by an activation function.
227      *
228      * Two dimensions are compatible when:
229      *     1. they are equal, or
230      *     2. one of them is 1
231      *
232      * The size of the output is the maximum size along each dimension of the
233      * input operands. It starts with the trailing dimensions, and works its
234      * way forward.
235      *
236      * Example:
237      *
238      *     input1.dimension = {4, 1, 2}
239      *     input2.dimension = {5, 4, 3, 1}
240      *     output.dimension = {5, 4, 3, 2}
241      *
242      * Since NNAPI feature level 3, generic zero-sized input tensor is supported. Zero
243      * dimension is only compatible with 0 or 1. The size of the output
244      * dimension is zero if either of corresponding input dimension is zero.
245      *
246      * Supported tensor {@link OperandCode}:
247      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
248      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
249      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
250      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
251      * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4)
252      *
253      * Supported tensor rank: up to 4
254      *
255      * Inputs:
256      * * 0: A tensor.
257      * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
258      *      as input0.
259      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
260      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
261      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
262      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
263      *      {@link FuseCode} values. Specifies the activation to
264      *      invoke on the result.
265      *      For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,
266      *      the {@link FuseCode} must be "NONE".
267      *
268      * Outputs:
269      * * 0: The sum, a tensor of the same {@link OperandCode} as input0.
270      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
271      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
272      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
273      *
274      * Available since NNAPI feature level 1.
275      */
276     ANEURALNETWORKS_ADD = 0,
277 
278     /**
279      * Performs a 2-D average pooling operation.
280      *
281      * The output dimensions are functions of the filter dimensions, stride, and
282      * padding.
283      *
284      * The values in the output tensor are computed as:
285      *
286      *     output[b, i, j, channel] =
287      *         sum_{di, dj}(
288      *             input[b, strides[1] * i + di, strides[2] * j + dj, channel]
289      *         ) / sum(1)
290      *
291      * Supported tensor {@link OperandCode}:
292      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
293      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
294      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
295      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
296      *
297      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
298      * With the default data layout NHWC, the data is stored in the order of:
299      * [batch, height, width, channels]. Alternatively, the data layout could
300      * be NCHW, the data storage order of: [batch, channels, height, width].
301      * NCHW is supported since NNAPI feature level 3.
302      *
303      * Both explicit padding and implicit padding are supported.
304      *
305      * Inputs (explicit padding):
306      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
307      *      the input.
308      *      Since NNAPI feature level 3, zero batches is supported for this tensor.
309      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
310      *      the left, in the ‘width’ dimension.
311      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
312      *      the right, in the ‘width’ dimension.
313      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
314      *      the top, in the ‘height’ dimension.
315      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
316      *      the bottom, in the ‘height’ dimension.
317      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
318      *      walking through input in the ‘width’ dimension.
319      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
320      *      walking through input in the ‘height’ dimension.
321      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
322      *      width.
323      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
324      *      height.
325      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
326      *      {@link FuseCode} values. Specifies the activation to
327      *      invoke on the result.
328      * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
329      *       Set to true to specify NCHW data layout for input0 and output0.
330      *       Available since NNAPI feature level 3.
331      *
332      * Inputs (implicit padding):
333      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
334      *      the input.
335      *      Since NNAPI feature level 3, zero batches is supported for this tensor.
336      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
337      *      padding scheme, has to be one of the
338      *      {@link PaddingCode} values.
339      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
340      *      walking through input in the ‘width’ dimension.
341      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
342      *      walking through input in the ‘height’ dimension.
343      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
344      *      width.
345      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
346      *      height.
347      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
348      *      {@link FuseCode} values. Specifies the activation to
349      *      invoke on the result.
350      * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
351      *      Set to true to specify NCHW data layout for input0 and output0.
352      *      Available since NNAPI feature level 3.
353      *
354      * Outputs:
355      * * 0: The output 4-D tensor, of shape
356      *      [batches, out_height, out_width, depth].
357      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
358      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
359      *      the scale and zeroPoint must be the same as input0.
360      *
361      * Available since NNAPI feature level 1.
362      */
363     ANEURALNETWORKS_AVERAGE_POOL_2D = 1,
364 
365     /**
366      * Concatenates the input tensors along the given dimension.
367      *
368      * The input tensors must have identical {@link OperandCode} and the same
369      * dimensions except the dimension along the concatenation axis.
370      *
371      * Supported tensor {@link OperandCode}:
372      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
373      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
374      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
375      *   (full support since NNAPI feature level 3, see the input section)
376      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
377      *
378      * Supported tensor rank: up to 4
379      *
380      * Inputs:
381      * * 0 ~ n-1: The list of n input tensors, of shape
382      *            [D0, D1, ..., Daxis(i), ..., Dm].
383      *            Before NNAPI feature level 3, all input tensors of
384      *            {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
385      *            must have the same scale and zeroPoint as the output tensor.
386      *            Input tensors of
387      *            {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
388      *            are allowed to have different scale and zeroPoint.
389      *            Since NNAPI feature level 3, zero-sized tensors are supported.
390      * * n: An {@link ANEURALNETWORKS_INT32} scalar, specifying the
391      *      concatenation axis.
392      *
393      * Outputs:
394      * * 0: The output, a tensor of the same {@link OperandCode} as the input
395      *      tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
396      *      Since NNAPI feature level 3, for a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
397      *      the scale and zeroPoint values can be different from
398      *      input tensors. Before NNAPI feature level 3 they have to be the same as for the
399      *      input tensors.
400      *
401      * Available since NNAPI feature level 1.
402      */
403     ANEURALNETWORKS_CONCATENATION = 2,
404 
405     /**
406      * Performs a 2-D convolution operation.
407      *
408      * The CONV_2D op sweeps a 2-D filter that can mix channels together over a
409      * batch of images, applying the filter to each window of each image of the
410      * appropriate size.
411      *
412      * The output dimensions are functions of the filter dimensions, stride, and
413      * padding.
414      *
415      * The values in the output tensor are computed as:
416      *
417      *     output[b, i, j, channel] =
418      *         sum_{di, dj, k} (
419      *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
420      *             filter[channel, di, dj, k]
421      *         ) + bias[channel]
422      *
423      * Supported tensor {@link OperandCode} configurations:
424      * * 32 bit floating point:
425      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.
426      *
427      * * Quantized:
428      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.
429      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
430      * * * input.scale * filter.scale).
431      *
432      * Available since NNAPI feature level 3:
433      * * 16 bit floating point:
434      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.
435      *
436      * * Quantized with symmetric per channel quantization for the filter:
437      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.
438      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
439      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
440      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
441      *
442      * Available since NNAPI feature level 4:
443      * * Quantized signed (since NNAPI feature level 4):
444      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
445      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
446      * * * input.scale * filter.scale).
447      *
448      * * Quantized signed with filter symmetric per channel quantization
449      *   (since NNAPI feature level 4):
450      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
451      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
452      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
453      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
454      *
455      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
456      * With the default data layout NHWC, the data is stored in the order of:
457      * [batch, height, width, channels]. Alternatively, the data layout could
458      * be NCHW, the data storage order of: [batch, channels, height, width].
459      * NCHW is supported since NNAPI feature level 3.
460      *
461      * Both explicit padding and implicit padding are supported.
462      *
463      * Inputs (explicit padding):
464      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
465      *      specifying the input.
466      *      Since NNAPI feature level 3, zero batches is supported for this tensor.
467      * * 1: A 4-D tensor, of shape
468      *      [depth_out, filter_height, filter_width, depth_in], specifying the
469      *      filter.
470      *      For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
471      *      the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)
472      *      must be set to 0.
473      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
474      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}
475      *      or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type.
476      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
477      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
478      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
479      *      of 0 and bias_scale == input_scale * filter_scale.
480      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
481      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
482      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
483      *      bias_scale[i] = input_scale * filter_scale[i].
484      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
485      *      the left, in the ‘width’ dimension.
486      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
487      *      the right, in the ‘width’ dimension.
488      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
489      *      the top, in the ‘height’ dimension.
490      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
491      *      the bottom, in the ‘height’ dimension.
492      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
493      *      walking through input in the ‘width’ dimension.
494      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
495      *      walking through input in the ‘height’ dimension.
496      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
497      *      {@link FuseCode} values. Specifies the activation to
498      *      invoke on the result.
499      * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
500      *      Set to true to specify NCHW data layout for input0 and output0.
501      *      Available since NNAPI feature level 3.
502      * * 11: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
503      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
504      *      cells between each filter element on width dimension. If this input is set,
505      *      input 12 (dilation factor for height) must be specified as well.
506      *      Available since NNAPI feature level 3.
507      * * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
508      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
509      *      cells between each filter element on height dimension. If this input is set,
510      *      input 11 (dilation factor for width) must be specified as well.
511      *      Available since NNAPI feature level 3.
512      *
513      * Inputs (implicit padding):
514      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
515      *      specifying the input.
516      *      Since NNAPI feature level 3, zero batches is supported for this tensor.
517      * * 1: A 4-D tensor, of shape
518      *      [depth_out, filter_height, filter_width, depth_in], specifying the
519      *      filter.
520      *      For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
521      *      the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)
522      *      must be set to 0.
523      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
524      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}
525      *      or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same
526      *      type.
527      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
528      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
529      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
530      *      of 0 and bias_scale == input_scale * filter_scale.
531      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
532      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
533      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
534      *      bias_scale[i] = input_scale * filter_scale[i].
535      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
536      *      padding scheme, has to be one of the
537      *      {@link PaddingCode} values.
538      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
539      *      walking through input in the ‘width’ dimension.
540      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
541      *      walking through input in the ‘height’ dimension.
542      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
543      *      {@link FuseCode} values. Specifies the activation to
544      *      invoke on the result.
545      * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
546      *      Set to true to specify NCHW data layout for input0 and output0.
547      *      Available since NNAPI feature level 3.
548      * * 8: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
549      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
550      *      cells between each filter element on width dimension. If this input is set,
551      *      input 9 (dilation factor for height) must be specified as well.
552      *      Available since NNAPI feature level 3.
553      * * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
554      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
555      *      cells between each filter element on height dimension. If this input is set,
556      *      input 8 (dilation factor for width) must be specified as well.
557      *      Available since NNAPI feature level 3.
558      *
559      * Outputs:
560      * * 0: The output 4-D tensor, of shape
561      *      [batches, out_height, out_width, depth_out].
562      *      Before NNAPI feature level 3, for output tensor of
563      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following condition must
564      *      be satisfied: output_scale > input_scale * filter_scale
565      *
566      * Available since NNAPI feature level 1.
567      */
568     ANEURALNETWORKS_CONV_2D = 3,
569 
570     /**
571      * Performs a depthwise 2-D convolution operation.
572      *
573      * Given an input tensor of shape [batches, height, width, depth_in] and a
574      * filter tensor of shape [1, filter_height, filter_width, depth_out]
575      * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV
576      * applies a different filter to each input channel (expanding from 1
577      * channel to channel_multiplier channels for each), then concatenates the
578      * results together.
579      *
580      * The output has depth_out = depth_in * depth_multiplier channels.
581      * The output dimensions are functions of the filter dimensions, stride, and
582      * padding.
583      *
584      * The values in the output tensor are computed as:
585      *
586      *     output[b, i, j, k * channel_multiplier + q] =
587      *         sum_{di, dj} (
588      *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
589      *             filter[1, di, dj, k * channel_multiplier + q]
590      *         ) + bias[k * channel_multiplier + q]
591      *
592      * Supported tensor {@link OperandCode} configurations:
593      * * 32 bit floating point:
594      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.
595      *
596      * * Quantized:
597      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.
598      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
599      * * * input.scale * filter.scale).
600      *
601      * Available since NNAPI feature level 3:
602      * * 16 bit floating point:
603      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.
604      *
605      * * Quantized with symmetric per channel quantization for the filter:
606      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.
607      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
608      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
609      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
610      *
611      * Available since NNAPI feature level 4:
612      * * Quantized signed (since NNAPI feature level 4):
613      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
614      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
615      * * * input.scale * filter.scale).
616      *
617      * * Quantized signed with filter symmetric per channel quantization
618      *   (since NNAPI feature level 4):
619      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
620      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
621      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
622      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
623      *
624      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
625      * With the default data layout NHWC, the data is stored in the order of:
626      * [batch, height, width, channels]. Alternatively, the data layout could
627      * be NCHW, the data storage order of: [batch, channels, height, width].
628      * NCHW is supported since NNAPI feature level 3.
629      *
630      * Both explicit padding and implicit padding are supported.
631      *
632      * Inputs (explicit padding):
633      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
634      *      specifying the input.
635      * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
636      *      specifying the filter.
637      *      For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
638      *      the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)
639      *      must be set to 3.
640      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
641      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}
642      *      or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type.
643      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
644      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
645      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
646      *      of 0 and bias_scale == input_scale * filter_scale.
647      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
648      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
649      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
650      *      bias_scale[i] = input_scale * filter_scale[i].
651      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
652      *      the left, in the ‘width’ dimension.
653      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
654      *      the right, in the ‘width’ dimension.
655      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
656      *      the top, in the ‘height’ dimension.
657      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
658      *      the bottom, in the ‘height’ dimension.
659      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
660      *      walking through input in the ‘width’ dimension.
661      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
662      *      walking through input in the ‘height’ dimension.
663      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise
664      *      multiplier.
665      * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
666      *       {@link FuseCode} values. Specifies the activation to
667      *       invoke on the result.
668      * * 11: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
669      *       Set to true to specify NCHW data layout for input0 and output0.
670      *       Available since NNAPI feature level 3.
671      * * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
672      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
673      *      cells between each filter element on width dimension. If this input is set,
674      *      input 13 (dilation factor for height) must be specified as well.
675      *      Available since NNAPI feature level 3.
676      * * 13: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
677      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
678      *      cells between each filter element on height dimension. If this input is set,
679      *      input 12 (dilation factor for width) must be specified as well.
680      *      Available since NNAPI feature level 3.
681      *
682      * Inputs (implicit padding):
683      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
684      *      specifying the input.
685      * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
686      *      specifying the filter.
687      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
688      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}
689      *      or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type.
690      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
691      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
692      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
693      *      of 0 and bias_scale == input_scale * filter_scale.
694      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
695      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
696      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
697      *      bias_scale[i] = input_scale * filter_scale[i].
698      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
699      *      padding scheme, has to be one of the
700      *      {@link PaddingCode} values.
701      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
702      *      walking through input in the ‘width’ dimension.
703      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
704      *      walking through input in the ‘height’ dimension.
705      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise
706      *      multiplier.
707      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
708      *      {@link FuseCode} values. Specifies the activation to
709      *      invoke on the result.
710      * * 8: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
711      *      Set to true to specify NCHW data layout for input0 and output0.
712      *      Available since NNAPI feature level 3.
713      * * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
714      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
715      *      cells between each filter element on width dimension. If this input is set,
716      *      input 10 (dilation factor for height) must be specified as well.
717      *      Available since NNAPI feature level 3.
718      * * 10: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
719      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
720      *      cells between each filter element on height dimension. If this input is set,
721      *      input 9 (dilation factor for width) must be specified as well.
722      *      Available since NNAPI feature level 3.
723      *
724      * Outputs:
725      * * 0: The output 4-D tensor, of shape
726      *      [batches, out_height, out_width, depth_out]. Before NNAPI feature level 3, for
727      *      output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
728      *      the following condition must be satisfied:
729      *      output_scale > input_scale * filter_scale
730      *
731      * Available since NNAPI feature level 1.
732      */
733     ANEURALNETWORKS_DEPTHWISE_CONV_2D = 4,
734 
735     /**
736      * Rearranges data from depth into blocks of spatial data.
737      *
738      * More specifically, this op outputs a copy of the input tensor where
739      * values from the depth dimension are moved in spatial blocks to the height
740      * and width dimensions. The value block_size indicates the input block size
741      * and how the data is moved.
742      *
743      * Chunks of data of size block_size * block_size from depth are rearranged
744      * into non-overlapping blocks of size block_size x block_size.
745      *
746      * The width of the output tensor is input_depth * block_size, whereas the
747      * height is input_height * block_size. The depth of the input tensor must
748      * be divisible by block_size * block_size
749      *
750      * Supported tensor {@link OperandCode}:
751      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
752      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
753      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
754      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
755      *
756      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
757      * With the default data layout NHWC, the data is stored in the order of:
758      * [batch, height, width, channels]. Alternatively, the data layout could
759      * be NCHW, the data storage order of: [batch, channels, height, width].
760      * NCHW is supported since NNAPI feature level 3.
761      *
762      * Inputs:
763      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
764      *      specifying the input.
765      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size.
766      *      block_size must be >=1 and block_size * block_size must be a divisor
767      *      of the input depth.
768      * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
769      *      Set to true to specify NCHW data layout for input0 and output0.
770      *      Available since NNAPI feature level 3.
771      *
772      * Outputs:
773      * * 0: The output 4-D tensor, of shape [batch, height*block_size,
774      *      width*block_size, depth/(block_size*block_size)].
775      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
776      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
777      *      the scale and zeroPoint must be the same as input0.
778      *
779      * Available since NNAPI feature level 1.
780      */
781     ANEURALNETWORKS_DEPTH_TO_SPACE = 5,
782 
783     /**
784      * Dequantizes the input tensor.
785      *
786      * The formula is:
787      *
788      *     output = (input - zeroPoint) * scale.
789      *
790      * Supported input tensor {@link OperandCode}:
791      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
792      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} (since NNAPI feature level 3)
793      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} (since NNAPI feature level 3)
794      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
795      *
796      * Supported output tensor {@link OperandCode}:
797      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
798      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
799      *
800      * Supported tensor rank: up to 4
801      *
802      * Inputs:
803      * * 0: A tensor.
804      *      Since NNAPI feature level 3, this tensor may be zero-sized.
805      *
806      * Outputs:
807      * * 0: A tensor with the same shape as input0.
808      *
809      * Available since NNAPI feature level 1.
810      */
811     ANEURALNETWORKS_DEQUANTIZE = 6,
812 
813     /**
814      * Looks up sub-tensors in the input tensor.
815      *
816      * This operator takes for input a tensor of values (Values) and
817      * a one-dimensional tensor of selection indices (Lookups).
818      * The output tensor is the concatenation of sub-tensors of Values as
819      * selected by Lookups.
820      *
821      * Think of Values as being sliced along its first dimension:
822      * The entries in Lookups select which slices are concatenated together
823      * to create the output tensor.
824      *
825      * For example, if Values has shape of [40, 200, 300] and
826      * Lookups has shape of [3], all three values found in Lookups are
827      * expected to be between 0 and 39. The resulting tensor must
828      * have shape of [3, 200, 300].
829      *
830      * If a value in Lookups is out of bounds, the operation must fail
831      * and an error must be reported.
832      *
833      * Supported value tensor {@link OperandCode}:
834      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 4)
835      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
836      * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 3)
837      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3)
838      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
839      *
840      * Supported value tensor rank: from 2
841      *
842      * Inputs:
843      * * 0: Lookups. A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}.
844      *      The values are indices into the first dimension of Values.
845      * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are
846      *      extracted.
847      *
848      * Output:
849      * * 0: A n-D tensor with the same rank and shape as the Values
850      *      tensor, except for the first dimension which has the same size
851      *      as Lookups' only dimension.
852      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
853      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
854      *      the scale and zeroPoint must be the same as input1.
855      *
856      * Available since NNAPI feature level 1.
857      */
858     ANEURALNETWORKS_EMBEDDING_LOOKUP = 7,
859 
860     /**
861      * Computes element-wise floor() on the input tensor.
862      *
863      * Supported tensor {@link OperandCode}:
864      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
865      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
866      *
867      * Supported tensor rank: up to 4
868      *
869      * Inputs:
870      * * 0: A tensor.
871      *
872      * Outputs:
873      * * 0: The output tensor, of the same {@link OperandCode} and dimensions as
874      *      the input tensor.
875      *
876      * Available since NNAPI feature level 1.
877      */
878     ANEURALNETWORKS_FLOOR = 8,
879 
880     /**
881      * Denotes a fully (densely) connected layer, which connects all elements
882      * in the input tensor with each element in the output tensor.
883      *
884      * This layer implements the operation:
885      *
886      *     outputs = activation(inputs * weights’ + bias)
887      *
888      * Supported tensor {@link OperandCode}:
889      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
890      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
891      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
892      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
893      *
894      * Supported tensor rank: up to 4.
895      *
896      * Inputs:
897      * * 0: A tensor of at least rank 2, specifying the input. If rank is
898      *      greater than 2, then it gets flattened to a 2-D Tensor. The
899      *      (flattened) 2-D Tensor is reshaped (if necessary) to
900      *      [batch_size, input_size], where "input_size" corresponds to the
901      *      number of inputs to the layer, matching the second dimension of
902      *      weights, and "batch_size" is calculated by dividing the number of
903      *      elements by "input_size".
904      *      Since NNAPI feature level 3, zero batch_size is supported for this tensor.
905      * * 1: A 2-D tensor, specifying the weights, of shape
906      *      [num_units, input_size], where "num_units" corresponds to the number
907      *      of output nodes.
908      * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
909      *      tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should
910      *      also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
911      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
912      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
913      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32},
914      *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
915      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
916      *      {@link FuseCode} values. Specifies the activation to
917      *      invoke on the result.
918      *
919      * Outputs:
920      * * 0: The output tensor, of shape [batch_size, num_units]. Before NNAPI feature level 3, for
921      *      output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following
922      *      condition must be satisfied: output_scale > input_scale * filter_scale.
923      *
924      * Available since NNAPI feature level 1.
925      */
926     ANEURALNETWORKS_FULLY_CONNECTED = 9,
927 
928     /**
929      * Looks up sub-tensors in the input tensor using a key-value map.
930      *
931      * This operator takes for input a tensor of values (Values),
932      * a one-dimensional tensor of selection values (Lookups) and
933      * a one-dimensional tensor that maps these values to Values
934      * indexes. The output tensor is the concatenation of sub-tensors of
935      * Values as selected by Lookups via Keys.
936      *
937      * Think of Values as being sliced along its outer-most dimension.
938      * The output is a concatenation of selected slices, with one slice
939      * for each entry of Lookups. The slice selected is the one at the
940      * same index as the Maps entry that matches the value in Lookups.
941      *
942      * For a hit, the corresponding sub-tensor of Values is included
943      * in the Output tensor. For a miss, the corresponding sub-tensor in
944      * Output must have zero values.
945      *
946      * For example, if Values has shape of [40, 200, 300],
947      * Keys should have a shape of [40]. If Lookups tensor has shape
948      * of [3], three slices are being concatenated, so the resulting tensor
949      * must have the shape of [3, 200, 300]. If the first entry in Lookups
950      * has the value 123456, that value must be located in Keys tensor.
951      * If the sixth entry of Keys contains 123456, the sixth slice of Values
952      * must be selected. If no entry in Keys has 123456, a slice of zeroes
953      * must be concatenated.
954      *
955      * Supported value tensor {@link OperandCode}:
956      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
957      * * {@link ANEURALNETWORKS_TENSOR_INT32}
958      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
959      *
960      * Supported value tensor rank: from 2
961      *
962      * Inputs:
963      * * 0: Lookups. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with
964      *      shape [ k ].
965      * * 1: Keys. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape
966      *      [ n ]; Keys and Values pair represent a map, i.e., the ith element
967      *      in Keys (Keys[i]) is the key to select the ith sub-tensor in Values
968      *      (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in
969      *      ascending order.
970      * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension
971      *      must be n.
972      *
973      * Outputs:
974      * * 0: Output. A tensor with shape [ k …].
975      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
976      *      the scale and zeroPoint must be the same as input2.
977      * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup
978      *      hits (True) or not (False).
979      *      Stored as {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} with offset 0
980      *      and scale 1.0f.
981      *      A non-zero byte represents True, a hit. A zero indicates otherwise.
982      *
983      * Available since NNAPI feature level 1.
984      */
985     ANEURALNETWORKS_HASHTABLE_LOOKUP = 10,
986 
987     /**
988      * Applies L2 normalization along the axis dimension.
989      *
990      * The values in the output tensor are computed as:
991      *
992      *     output[batch, row, col, channel] =
993      *         input[batch, row, col, channel] /
994      *         sqrt(sum_{c} pow(input[batch, row, col, c], 2))
995      *
996      * By default the axis dimension is the last dimension of the input tensor.
997      *
998      * Supported tensor {@link OperandCode}:
999      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1000      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1001      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3)
1002      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1003      *
1004      * Supported tensor rank: up to 4
1005      * Tensors with rank less than 4 are only supported since NNAPI feature level 3.
1006      *
1007      * Inputs:
1008      * * 0: An n-D tensor, specifying the tensor to be normalized.
1009      * * 1: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1,
1010      *      specifying the dimension normalization would be performed on.
1011      *      Negative index is used to specify axis from the end (e.g. -1 for
1012      *      the last axis). Must be in the range [-n, n).
1013      *      Available since NNAPI feature level 3.
1014      *
1015      * Outputs:
1016      * * 0: A tensor of the same {@link OperandCode} and same shape as input0.
1017      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
1018      *      the scale must be 1.f / 128 and the zeroPoint must be 128.
1019      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
1020      *      the scale must be 1.f / 128 and the zeroPoint must be 0.
1021      *
1022      *      NOTE: Before NNAPI feature level 4, if the elements along an axis are all zeros,
1023      *      the result is undefined. Since NNAPI feature level 4, if the elements along an axis
1024      *      are all zeros, the result is logical zero.
1025      *
1026      * Available since NNAPI feature level 1.
1027      */
1028     ANEURALNETWORKS_L2_NORMALIZATION = 11,
1029 
1030     /**
1031      * Performs an 2-D L2 pooling operation.
1032      *
1033      * The output dimensions are functions of the filter dimensions, stride, and
1034      * padding.
1035      *
1036      * The values in the output tensor are computed as:
1037      *
1038      *     output[b, i, j, c] =
1039      *         sqrt(sum_{di, dj} pow(input[b, strides[1] * i + di, strides[2] * j + dj, c], 2) /
1040      *              sum(1))
1041      *
1042      * Supported tensor {@link OperandCode}:
1043      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1044      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1045      *
1046      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1047      * With the default data layout NHWC, the data is stored in the order of:
1048      * [batch, height, width, channels]. Alternatively, the data layout could
1049      * be NCHW, the data storage order of: [batch, channels, height, width].
1050      * NCHW is supported since NNAPI feature level 3.
1051      *
1052      * Both explicit padding and implicit padding are supported.
1053      *
1054      * Inputs (explicit padding):
1055      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1056      *      the input.
1057      *      Since NNAPI feature level 3, zero batches is supported for this tensor.
1058      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1059      *      the left, in the ‘width’ dimension.
1060      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1061      *      the right, in the ‘width’ dimension.
1062      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1063      *      the top, in the ‘height’ dimension.
1064      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1065      *      the bottom, in the ‘height’ dimension.
1066      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1067      *      walking through input in the ‘width’ dimension.
1068      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1069      *      walking through input in the ‘height’ dimension.
1070      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1071      *      width.
1072      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1073      *      height.
1074      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1075      *      {@link FuseCode} values. Specifies the activation to
1076      *      invoke on the result.
1077      * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1078      *       Set to true to specify NCHW data layout for input0 and output0.
1079      *       Available since NNAPI feature level 3.
1080      *
1081      * Inputs (implicit padding):
1082      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1083      *      the input.
1084      *      Since NNAPI feature level 3, zero batches is supported for this tensor.
1085      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
1086      *      padding scheme, has to be one of the
1087      *      {@link PaddingCode} values.
1088      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1089      *      walking through input in the ‘width’ dimension.
1090      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1091      *      walking through input in the ‘height’ dimension.
1092      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1093      *      width.
1094      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1095      *      height.
1096      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1097      *      {@link FuseCode} values. Specifies the activation to
1098      *      invoke on the result.
1099      * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1100      *      Set to true to specify NCHW data layout for input0 and output0.
1101      *      Available since NNAPI feature level 3.
1102      *
1103      * Outputs:
1104      * * 0: The output 4-D tensor, of shape
1105      *      [batches, out_height, out_width, depth].
1106      *
1107      * Available since NNAPI feature level 1.
1108      */
1109     ANEURALNETWORKS_L2_POOL_2D = 12,
1110 
1111     /**
1112      * Applies Local Response Normalization along the depth dimension.
1113      *
1114      * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the
1115      * last dimension), and each vector is normalized independently. Within a
1116      * given vector, each component is divided by the weighted, squared sum of
1117      * inputs within depth_radius.
1118      *
1119      * The output is calculated using this formula:
1120      *
1121      *     sqr_sum[a, b, c, d] = sum(
1122      *         pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2))
1123      *     output = input / pow((bias + alpha * sqr_sum), beta)
1124      *
1125      * For input tensor with rank less than 4, independently normalizes each
1126      * 1-D slice along specified dimension.
1127      *
1128      * Supported tensor {@link OperandCode}:
1129      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1130      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1131      *
1132      * Supported tensor rank: up to 4
1133      * Tensors with rank less than 4 are only supported since NNAPI feature level 3.
1134      *
1135      * Inputs:
1136      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1137      *      the input.
1138      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the radius of
1139      *      the normalization window.
1140      * * 2: A scalar, specifying the bias, must not be zero.
1141      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias
1142      *      value must be of {@link ANEURALNETWORKS_FLOAT16}.
1143      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias
1144      *      value must be of {@link ANEURALNETWORKS_FLOAT32}.
1145      * * 3: A scalar, specifying the scale factor, alpha.
1146      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the
1147      *      alpha value must be of {@link ANEURALNETWORKS_FLOAT16}.
1148      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the
1149      *      alpha value must be of {@link ANEURALNETWORKS_FLOAT32}.
1150      * * 4: A scalar, specifying the exponent, beta.
1151      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta
1152      *      value must be of {@link ANEURALNETWORKS_FLOAT16}.
1153      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta
1154      *      value must be of {@link ANEURALNETWORKS_FLOAT32}.
1155      * * 5: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1,
1156      *      specifying the dimension normalization would be performed on.
1157      *      Negative index is used to specify axis from the end (e.g. -1 for
1158      *      the last axis). Must be in the range [-n, n).
1159      *      Available since NNAPI feature level 3.
1160      *
1161      * Outputs:
1162      * * 0: The output tensor of same shape as input0.
1163      *
1164      * Available since NNAPI feature level 1.
1165      */
1166     ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION = 13,
1167 
1168     /**
1169      * Computes sigmoid activation on the input tensor element-wise.
1170      *
1171      * The output is calculated using this formula:
1172      *
1173      *     output = 1 / (1 + exp(-input))
1174      *
1175      * Supported tensor {@link OperandCode}:
1176      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1177      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1178      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1179      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1180      *
1181      * Supported tensor rank: up to 4.
1182      *
1183      * Inputs:
1184      * * 0: A tensor, specifying the input.
1185      *      Since NNAPI feature level 3, this tensor may be zero-sized.
1186      *
1187      * Outputs:
1188      * * 0: The output tensor of same shape as input0.
1189      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
1190      *      the scale must be 1.f / 256 and the zeroPoint must be 0.
1191      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
1192      *      the scale must be 1.f / 256 and the zeroPoint must be -128.
1193      *
1194      * Available since NNAPI feature level 1.
1195      */
1196     ANEURALNETWORKS_LOGISTIC = 14,
1197 
1198     /**
1199      * Projects an input to a bit vector via locality senstive hashing.
1200      *
1201      * Supported input tensor {@link OperandCode}:
1202      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1203      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1204      * * {@link ANEURALNETWORKS_TENSOR_INT32}
1205      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1206      *
1207      * Supported input tensor rank: from 1
1208      *
1209      * Inputs:
1210      * * 0: Hash functions. Dim.size == 2, DataType: Float.
1211      *      Tensor[0].Dim[0]: Number of hash functions.
1212      *      Tensor[0].Dim[1]: Number of projected output bits generated by each
1213      *      hash function.
1214      *      If the projection type is Sparse:
1215      *      Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32
1216      *
1217      * * 1: Input. Dim.size >= 1, no restriction on DataType.
1218      * * 2: Weight. Optional. Dim.size == 1, DataType: Float.
1219      *      If not set, each input element is considered to have the same weight
1220      *      of 1.0.
1221      *      Tensor[1].Dim[0] == Tensor[2].Dim[0]
1222      * * 3: Type:
1223      *        Sparse:
1224      *          Value LSHProjectionType_SPARSE(=3) (since NNAPI feature level 3).
1225      *          Computed bit vector is considered to be sparse.
1226      *          Each output element is an int32 made up of multiple bits
1227      *          computed from hash functions.
1228      *
1229      *          NOTE: To avoid collisions across hash functions, an offset value
1230      *          of k * (1 << Tensor[0].Dim[1]) will be added to each signature,
1231      *          where k is the index of the hash function.
1232      *
1233      *          Value LSHProjectionType_SPARSE_DEPRECATED(=1).
1234      *          Legacy behavior that does not include the offset value.
1235      *
1236      *        Dense:
1237      *          Value LSHProjectionType_DENSE(=2).
1238      *          Computed bit vector is considered to be dense. Each output
1239      *          element represents a bit and can take the value of either
1240      *          0 or 1.
1241      *
1242      * Outputs:
1243      * * 0: If the projection type is Sparse:
1244      *      Output.Dim == { Tensor[0].Dim[0] }
1245      *      A tensor of int32 that represents hash signatures.
1246      *
1247      *      If the projection type is Dense:
1248      *      Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
1249      *      A flattened tensor that represents projected bit vectors.
1250      *
1251      * Available since NNAPI feature level 1.
1252      * The offset value for sparse projections was added in NNAPI feature level 3.
1253      */
1254     ANEURALNETWORKS_LSH_PROJECTION = 15,
1255 
1256     /**
1257      * Performs a single time step in a Long Short-Term Memory (LSTM) layer
1258      *
1259      * The LSTM operation is described by the following equations.
1260      *
1261      * \f{eqnarray*}{
1262      * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\
1263      * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\
1264      * C_t =& clip(f_t \odot C_{t-1} + i_t \odot
1265      *        g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\
1266      * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\
1267      *      & & \\
1268      *      & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})
1269      *      & if\ there\ is\ a\ projection; \\
1270      * h_t =& & \\
1271      *      & o_t \odot g(C_t) & otherwise. \\
1272      * \f}
1273      * Where:
1274      * * \f$x_t\f$ is the input,
1275      * * \f$i_t\f$ is the input gate,
1276      * * \f$f_t\f$ is the forget gate,
1277      * * \f$C_t\f$ is the cell state,
1278      * * \f$o_t\f$ is the output,
1279      * * \f$h_t\f$ is the output state,
1280      * * \f$\sigma\f$ is the logistic sigmoid function,
1281      * * \f$g\f$ is the cell input and cell output activation function, usually
1282      *   \f$tahn\f$,
1283      * * \f$W_{xi}\f$ is the input-to-input weight matrix,
1284      * * \f$W_{hi}\f$ is the recurrent to input weight matrix,
1285      * * \f$W_{ci}\f$ is the cell-to-input weight matrix,
1286      * * \f$b_i\f$ is the input gate bias,
1287      * * \f$W_{xf}\f$ is the input-to-forget weight matrix,
1288      * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix,
1289      * * \f$W_{cf}\f$ is the cell-to-forget weight matrix,
1290      * * \f$b_f\f$ is the forget gate bias,
1291      * * \f$W_{xc}\f$ is the input-to-cell weight matrix,
1292      * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix,
1293      * * \f$b_c\f$ is the cell bias,
1294      * * \f$W_{xo}\f$ is the input-to-output weight matrix,
1295      * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix,
1296      * * \f$W_{co}\f$ is the cell-to-output weight matrix,
1297      * * \f$b_o\f$ is the output gate bias,
1298      * * \f$W_{proj}\f$ is the projection weight matrix,
1299      * * \f$b_{proj}\f$ is the projection bias,
1300      * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and
1301      * * \f$t_{proj}\f$ is the threshold for clipping the projected output.
1302      * * \f$\odot\f$ is the
1303      *   <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)">
1304      *   Hadamard product</a> that takes two matrices and produces another
1305      *   matrix, each element of which is the product of the corresponding
1306      *   elements of the input matrices.
1307      *
1308      * Since NNAPI feature level 3 LSTM supports layer normalization.
1309      * In case layer normalization is used, the inputs to internal activation
1310      * functions (sigmoid and \f$g\f$) are normalized, rescaled and recentered
1311      * following an approach from section 3.1 from
1312      * https://arxiv.org/pdf/1607.06450.pdf
1313      *
1314      * The operation has the following independently optional inputs:
1315      * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights
1316      *   (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all
1317      *   have values or neither of them have values (i.e., all set to null). If
1318      *   they have values, the peephole optimization is used.
1319      * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
1320      *   (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values,
1321      *   or none of them have values. If they have no values, coupling of input
1322      *   and forget gates (CIFG) is used, in which case the input gate
1323      *   (\f$i_t\f$) is calculated using the following equation instead.
1324      *   \f{eqnarray*}{
1325      *   i_t = 1 - f_t
1326      *   \f}
1327      *   In case peephole optimization is used and CIFG is not used
1328      *   cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the
1329      *   cell-to-input weights must have no value.
1330      * * The projection weights (\f$W_{proj}\f$) is required only for the
1331      *   recurrent projection layer, and should otherwise have no value.
1332      * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
1333      *   value if the recurrent projection layer exists, and should otherwise
1334      *   have no value.
1335      * * (NNAPI feature level 3 or later) The four layer normalization weights either all have
1336      *   values or none of them have values. Additionally, if CIFG is used,
1337      *   input layer normalization weights tensor is omitted and the other layer
1338      *   normalization weights either all have values or none of them have
1339      *   values. Layer normalization is used when the values of all the layer
1340      *   normalization weights are present.
1341      *
1342      * References:
1343      *
1344      * The default non-peephole non-CIFG implementation is based on:
1345      * http://www.bioinf.jku.at/publications/older/2604.pdf
1346      * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural
1347      * Computation, 9(8):1735-1780, 1997.
1348      *
1349      * The peephole implementation and projection layer is based on:
1350      * https://research.google.com/pubs/archive/43905.pdf
1351      * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
1352      * recurrent neural network architectures for large scale acoustic
1353      * modeling." INTERSPEECH, 2014.
1354      * (However, the concept of peephole optimization was introduced in work
1355      * prior to this paper.)
1356      *
1357      * The coupling of input and forget gate (CIFG) is based on:
1358      * http://arxiv.org/pdf/1503.04069.pdf
1359      * Greff et al. "LSTM: A Search Space Odyssey"
1360      *
1361      * The layer normalization is based on:
1362      * https://arxiv.org/pdf/1607.06450.pdf
1363      * Jimmy Ba et al. "Layer Normalization"
1364      *
1365      * Supported tensor {@link OperandCode}:
1366      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1367      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1368      *
1369      * All input and output tensors must be of the same type.
1370      *
1371      * Inputs:
1372      * * 0: The input (\f$x_t\f$).
1373      *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
1374      *      corresponds to the batching dimension, and “input_size” is the size
1375      *      of the input.
1376      * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
1377      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
1378      *      corresponds to the number of cell units.
1379      * * 2: The input-to-forget weights (\f$W_{xf}\f$).
1380      *      A 2-D tensor of shape [num_units, input_size].
1381      * * 3: The input-to-cell weights (\f$W_{xc}\f$).
1382      *      A 2-D tensor of shape [num_units, input_size].
1383      * * 4: The input-to-output weights (\f$W_{xo}\f$).
1384      *      A 2-D tensor of shape [num_units, input_size].
1385      * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
1386      *      A 2-D tensor of shape [num_units, output_size], where “output_size”
1387      *      corresponds to either the number of cell units (i.e., “num_units”),
1388      *      or the second dimension of the “projection_weights”, if defined.
1389      * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
1390      *      A 2-D tensor of shape [num_units, output_size].
1391      * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
1392      *      A 2-D tensor of shape [num_units, output_size].
1393      * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
1394      *      A 2-D tensor of shape [num_units, output_size].
1395      * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
1396      *      A 1-D tensor of shape [num_units].
1397      * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
1398      *      A 1-D tensor of shape [num_units].
1399      * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
1400      *      A 1-D tensor of shape [num_units].
1401      * * 12:The input gate bias (\f$b_i\f$). Optional.
1402      *      A 1-D tensor of shape [num_units].
1403      * * 13:The forget gate bias (\f$b_f\f$).
1404      *      A 1-D tensor of shape [num_units].
1405      * * 14:The cell bias (\f$b_c\f$).
1406      *      A 1-D tensor of shape [num_units].
1407      * * 15:The output gate bias (\f$b_o\f$).
1408      *      A 1-D tensor of shape [num_units].
1409      * * 16:The projection weights (\f$W_{proj}\f$). Optional.
1410      *      A 2-D tensor of shape [output_size, num_units].
1411      * * 17:The projection bias (\f$b_{proj}\f$). Optional.
1412      *      A 1-D tensor of shape [output_size].
1413      * * 18:The output state (in) (\f$h_{t-1}\f$).
1414      *      A 2-D tensor of shape [batch_size, output_size].
1415      * * 19:The cell state (in) (\f$C_{t-1}\f$).
1416      *      A 2-D tensor of shape [batch_size, num_units].
1417      * * 20:The activation function (\f$g\f$).
1418      *      A value indicating the activation function:
1419      *      <ul>
1420      *      <li>0: None;
1421      *      <li>1: Relu;
1422      *      <li>3: Relu6;
1423      *      <li>4: Tanh;
1424      *      <li>6: Sigmoid.
1425      *      </ul>
1426      * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
1427      *      that values are bound within [-cell_clip, cell_clip]. If set to 0.0
1428      *      then clipping is disabled.
1429      *      Until NNAPI feature level 3 this scalar must be of type {@link
1430      *      ANEURALNETWORKS_FLOAT32}. Since NNAPI feature level 3, if all the input
1431      *      tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this
1432      *      scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
1433      *      otherwise if all the input tensors have the type {@link
1434      *      ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link
1435      *      ANEURALNETWORKS_FLOAT16}.
1436      * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
1437      *      projection layer, such that values are bound within
1438      *      [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1439      *      Until NNAPI feature level 3 this scalar must be of type {@link
1440      *      ANEURALNETWORKS_FLOAT32}. Since NNAPI feature level 3, if all the input
1441      *      tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this
1442      *      scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
1443      *      otherwise if all the input tensors have the type {@link
1444      *      ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link
1445      *      ANEURALNETWORKS_FLOAT16}.
1446      * Since NNAPI feature level 3 there are additional inputs to this op:
1447      * * 23:The input layer normalization weights.
1448      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1449      *      to activation at input gate.
1450      * * 24:The forget layer normalization weights.
1451      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1452      *      to activation at forget gate.
1453      * * 25:The cell layer normalization weights.
1454      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1455      *      to activation at cell gate.
1456      * * 26:The output layer normalization weights.
1457      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1458      *      to activation at output gate.
1459      *
1460      * Outputs:
1461      * * 0: The scratch buffer.
1462      *      A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or
1463      *      [batch_size, num_units * 4] without CIFG.
1464      * * 1: The output state (out) (\f$h_t\f$).
1465      *      A 2-D tensor of shape [batch_size, output_size].
1466      * * 2: The cell state (out) (\f$C_t\f$).
1467      *      A 2-D tensor of shape [batch_size, num_units].
1468      * * 3: The output (\f$o_t\f$).
1469      *      A 2-D tensor of shape [batch_size, output_size]. This is effectively
1470      *      the same as the current “output state (out)” value.
1471      *
1472      * Available since NNAPI feature level 1.
1473      */
1474     ANEURALNETWORKS_LSTM = 16,
1475 
1476     /**
1477      * Performs an 2-D max pooling operation.
1478      *
1479      * The output dimensions are functions of the filter dimensions, stride, and
1480      * padding.
1481      *
1482      * The values in the output tensor are computed as:
1483      *
1484      *     output[b, i, j, channel] =
1485      *         max_{di, dj} (
1486      *             input[b, strides[1] * i + di, strides[2] * j + dj, channel]
1487      *         )
1488      *
1489      * Supported tensor {@link OperandCode}:
1490      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1491      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1492      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1493      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1494      *
1495      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1496      * With the default data layout NHWC, the data is stored in the order of:
1497      * [batch, height, width, channels]. Alternatively, the data layout could
1498      * be NCHW, the data storage order of: [batch, channels, height, width].
1499      * NCHW is supported since NNAPI feature level 3.
1500      *
1501      * Both explicit padding and implicit padding are supported.
1502      *
1503      * Inputs (explicit padding):
1504      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1505      *      the input.
1506      *      Since NNAPI feature level 3, zero batches is supported for this tensor.
1507      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1508      *      the left, in the ‘width’ dimension.
1509      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1510      *      the right, in the ‘width’ dimension.
1511      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1512      *      the top, in the ‘height’ dimension.
1513      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1514      *      the bottom, in the ‘height’ dimension.
1515      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1516      *      walking through input in the ‘width’ dimension.
1517      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1518      *      walking through input in the ‘height’ dimension.
1519      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1520      *      width.
1521      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1522      *      height.
1523      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1524      *      {@link FuseCode} values. Specifies the activation to
1525      *      invoke on the result.
1526      * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1527      *       Set to true to specify NCHW data layout for input0 and output0.
1528      *       Available since NNAPI feature level 3.
1529      *
1530      * Inputs (implicit padding):
1531      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1532      *      the input.
1533      *      Since NNAPI feature level 3, zero batches is supported for this tensor.
1534      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
1535      *      padding scheme, has to be one of the
1536      *      {@link PaddingCode} values.
1537      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1538      *      walking through input in the ‘width’ dimension.
1539      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1540      *      walking through input in the ‘height’ dimension.
1541      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1542      *      width.
1543      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1544      *      height.
1545      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1546      *      {@link FuseCode} values. Specifies the activation to
1547      *      invoke on the result.
1548      * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1549      *      Set to true to specify NCHW data layout for input0 and output0.
1550      *      Available since NNAPI feature level 3.
1551      *
1552      * Outputs:
1553      * * 0: The output 4-D tensor, of shape
1554      *      [batches, out_height, out_width, depth].
1555      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1556      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1557      *      the scale and zeroPoint must be the same as input0.
1558      *
1559      * Available since NNAPI feature level 1.
1560      */
1561     ANEURALNETWORKS_MAX_POOL_2D = 17,
1562 
1563     /**
1564      * Multiplies two tensors, element-wise.
1565      *
1566      * Takes two input tensors of identical {@link OperandCode} and compatible
1567      * dimensions. The output is the product of both input tensors, optionally
1568      * modified by an activation function.
1569      *
1570      * Two dimensions are compatible when:
1571      *     1. they are equal, or
1572      *     2. one of them is 1
1573      *
1574      * The size of the resulting output is the maximum size along each dimension
1575      * of the input operands. It starts with the trailing dimensions, and works
1576      * its way forward.
1577      *
1578      * Since NNAPI feature level 3, generic zero-sized input tensor is supported. Zero
1579      * dimension is only compatible with 0 or 1. The size of the output
1580      * dimension is zero if either of corresponding input dimension is zero.
1581      *
1582      * Supported tensor {@link OperandCode}:
1583      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1584      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1585      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1586      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1587      * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4)
1588      *
1589      * Supported tensor rank: up to 4
1590      *
1591      * Inputs:
1592      * * 0: A tensor.
1593      * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
1594      *      as input0.
1595      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1596      *      {@link FuseCode} values. Specifies the activation to
1597      *      invoke on the result.
1598      *      For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,
1599      *      the {@link FuseCode} must be "NONE".
1600      *
1601      * Outputs:
1602      * * 0: The product, a tensor of the same {@link OperandCode} as input0.
1603      *      For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1604      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
1605      *      the following condition must be satisfied:
1606      *      output_scale > input1_scale * input2_scale.
1607      *
1608      * Available since NNAPI feature level 1.
1609      */
1610     ANEURALNETWORKS_MUL = 18,
1611 
1612     /**
1613      * Computes rectified linear activation on the input tensor element-wise.
1614      *
1615      * The output is calculated using this formula:
1616      *
1617      *     output = max(0, input)
1618      *
1619      * Supported tensor {@link OperandCode}:
1620      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1621      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1622      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1623      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1624      *
1625      * Supported tensor rank: up to 4.
1626      *
1627      * Inputs:
1628      * * 0: A tensor, specifying the input.
1629      *      Since NNAPI feature level 3, this tensor may be zero-sized.
1630      *
1631      * Outputs:
1632      * * 0: The output tensor of same shape as input0.
1633      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1634      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1635      *      the scale and zeroPoint must be the same as input0.
1636      *
1637      * Available since NNAPI feature level 1.
1638      */
1639     ANEURALNETWORKS_RELU = 19,
1640 
1641     /**
1642      * Computes rectified linear 1 activation on the input tensor element-wise.
1643      *
1644      * The output is calculated using this formula:
1645      *
1646      *     output = min(1.f, max(-1.f, input))
1647      *
1648      * Supported tensor {@link OperandCode}:
1649      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1650      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1651      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1652      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1653      *
1654      * Supported tensor rank: up to 4.
1655      *
1656      * Inputs:
1657      * * 0: A tensor, specifying the input.
1658      *      Since NNAPI feature level 3, this tensor may be zero-sized.
1659      *
1660      * Outputs:
1661      * * 0: The output tensor of the same shape as input0.
1662      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1663      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1664      *      the scale and zeroPoint must be the same as input0.
1665      *
1666      * Available since NNAPI feature level 1.
1667      */
1668     ANEURALNETWORKS_RELU1 = 20,
1669 
1670     /**
1671      * Computes rectified linear 6 activation on the input tensor element-wise.
1672      *
1673      * The output is calculated using this formula:
1674      *
1675      *     output = min(6, max(0, input))
1676      *
1677      * Supported tensor {@link OperandCode}:
1678      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1679      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1680      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1681      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1682      *
1683      * Supported tensor rank: up to 4.
1684      *
1685      * Inputs:
1686      * * 0: A tensor, specifying the input.
1687      *      Since NNAPI feature level 3, this tensor may be zero-sized.
1688      *
1689      * Outputs:
1690      * * 0: The output tensor of same shape as input0.
1691      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1692      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1693      *      the scale and zeroPoint must be the same as input0.
1694      *
1695      * Available since NNAPI feature level 1.
1696      */
1697     ANEURALNETWORKS_RELU6 = 21,
1698 
1699     /**
1700      * Reshapes a tensor.
1701      *
1702      * Given tensor, this operation returns a tensor that has the same values as
1703      * tensor, but with a newly specified shape.
1704      *
1705      * Supported tensor {@link OperandCode}:
1706      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1707      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1708      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1709      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1710      * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 6)
1711      *
1712      * Supported tensor rank: up to 4.
1713      *
1714      * Inputs:
1715      * * 0: A tensor, specifying the tensor to be reshaped.
1716      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, defining the
1717      *      shape of the output tensor. The number of elements implied by shape
1718      *      must be the same as the number of elements in the input tensor.
1719      *
1720      *      If one component of shape is the special value -1, the size of that
1721      *      dimension is computed so that the total size remains constant. In
1722      *      particular, a shape of [-1] flattens into 1-D. At most one component
1723      *      of shape can be -1.
1724      *
1725      * Outputs:
1726      * * 0: The output tensor, of shape specified by the input shape.
1727      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1728      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1729      *      the scale and zeroPoint must be the same as input0.
1730      *
1731      * Available since NNAPI feature level 1.
1732      */
1733     ANEURALNETWORKS_RESHAPE = 22,
1734 
1735     /**
1736      * Resizes images to given size using the bilinear interpretation.
1737      *
1738      * Resized images must be distorted if their output aspect ratio is not the
1739      * same as input aspect ratio. The corner pixels of output may not be the
1740      * same as corner pixels of input.
1741      *
1742      * Supported tensor {@link OperandCode}:
1743      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1744      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1745      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3)
1746      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1747      *
1748      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1749      * With the default data layout NHWC, the data is stored in the order of:
1750      * [batch, height, width, channels]. Alternatively, the data layout could
1751      * be NCHW, the data storage order of: [batch, channels, height, width].
1752      * NCHW is supported since NNAPI feature level 3.
1753      *
1754      * Both resizing by shape and resizing by scale are supported.
1755      *
1756      * Inputs (resizing by shape):
1757      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1758      *      the input.
1759      *      Since NNAPI feature level 3, zero batches is supported for this tensor.
1760      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
1761      *      width of the output tensor.
1762      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
1763      *      height of the output tensor.
1764      * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1765      *      Set to true to specify NCHW data layout for input0 and output0.
1766      *      Available since NNAPI feature level 3.
1767      * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}
1768      *      scalar, default to false.  If True, the centers of the 4 corner
1769      *      pixels of the input and output tensors are aligned, preserving the
1770      *      values at the corner pixels.
1771      *      Available since NNAPI feature level 4.
1772      * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}
1773      *      scalar, default to false. If True, the pixel centers are assumed to
1774      *      be at (0.5, 0.5). This is the default behavior of image.resize in
1775      *      TF 2.0. If this parameter is True, then align_corners parameter
1776      *      must be False.
1777      *      Available since NNAPI feature level 4.
1778      *
1779      * Inputs (resizing by scale, since NNAPI feature level 3):
1780      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1781      *      the input. Zero batches is supported for this tensor.
1782      * * 1: A scalar, specifying width_scale, the scaling factor of the width
1783      *      dimension from the input tensor to the output tensor. The output
1784      *      width is calculated as new_width = floor(width * width_scale).
1785      *      The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
1786      *      of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
1787      *      {@link ANEURALNETWORKS_FLOAT32} otherwise.
1788      * * 2: A scalar, specifying height_scale, the scaling factor of the height
1789      *      dimension from the input tensor to the output tensor. The output
1790      *      height is calculated as new_height = floor(height * height_scale).
1791      *      The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
1792      *      of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
1793      *      {@link ANEURALNETWORKS_FLOAT32} otherwise.
1794      * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1795      *      Set to true to specify NCHW data layout for input0 and output0.
1796      * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}
1797      *      scalar, default to false.  If True, the centers of the 4 corner
1798      *      pixels of the input and output tensors are aligned, preserving the
1799      *      values at the corner pixels.
1800      *      Available since NNAPI feature level 4.
1801      * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}
1802      *      scalar, default to false. If True, the pixel centers are assumed to
1803      *      be at (0.5, 0.5). This is the default behavior of image.resize in
1804      *      TF 2.0. If this parameter is True, then align_corners parameter
1805      *      must be False.
1806      *      Available since NNAPI feature level 4.
1807      *
1808      * Outputs:
1809      * * 0: The output 4-D tensor, of shape
1810      *      [batches, new_height, new_width, depth].
1811      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1812      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1813      *      the scale and zeroPoint must be the same as input0.
1814      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
1815      *      the scale and zeroPoint must be the same as input0.
1816      *
1817      * Available since NNAPI feature level 1.
1818      */
1819     ANEURALNETWORKS_RESIZE_BILINEAR = 23,
1820 
1821     /**
1822      * A basic recurrent neural network layer.
1823      *
1824      * This layer implements the operation:
1825      * outputs = state = activation(inputs * input_weights +
1826      *                              state * recurrent_weights + bias)
1827      *
1828      * Where:
1829      * * “input_weights” is a weight matrix that multiplies the inputs;
1830      * * “recurrent_weights” is a weight matrix that multiplies the current
1831      *    “state” which itself is the output from the previous time step
1832      *    computation;
1833      * * “bias” is a bias vector (added to each output vector in the batch);
1834      * * “activation” is the function passed as the “fused_activation_function”
1835      *   argument (if not “NONE”).
1836      *
1837      * Supported tensor {@link OperandCode}:
1838      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1839      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1840      *
1841      * The input tensors must all be the same type.
1842      *
1843      * Inputs:
1844      * * 0: input.
1845      *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
1846      *      corresponds to the batching dimension, and “input_size” is the size
1847      *      of the input.
1848      * * 1: weights.
1849      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
1850      *      corresponds to the number of units.
1851      * * 2: recurrent_weights.
1852      *      A 2-D tensor of shape [num_units, num_units], with columns
1853      *      corresponding to the weights from each unit.
1854      * * 3: bias.
1855      *      A 1-D tensor of shape [num_units].
1856      * * 4: hidden state (in).
1857      *      A 2-D tensor of shape [batch_size, num_units].
1858      * * 5: fused_activation_function.
1859      *      An optional {@link FuseCode} value indicating the
1860      *      activation function. If “NONE” is specified then it results in a
1861      *      linear activation.
1862      *
1863      * Outputs:
1864      * * 0: hidden state (out).
1865      *      A 2-D tensor of shape [batch_size, num_units].
1866      *
1867      * * 1: output.
1868      *      A 2-D tensor of shape [batch_size, num_units]. This is effectively
1869      *      the same as the current state value.
1870      *
1871      * Available since NNAPI feature level 1.
1872      */
1873     ANEURALNETWORKS_RNN = 24,
1874 
1875     /**
1876      * Computes the softmax activation on the input tensor element-wise, per
1877      * batch, by normalizing the input vector so the maximum coefficient is
1878      * zero.
1879      *
1880      * The output is calculated using this formula:
1881      *
1882      *     output[batch, i] =
1883      *         exp((input[batch, i] - max(input[batch, :])) * beta) /
1884      *         sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
1885      *
1886      * For input tensor with rank other than 2, the activation will be applied
1887      * independently on each 1-D slice along specified dimension.
1888      *
1889      * Supported tensor {@link OperandCode}:
1890      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1891      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1892      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1893      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1894      *
1895      * Supported tensor rank: up to 4.
1896      * Tensors with rank other than 2 or 4 are only supported since NNAPI feature level 3.
1897      *
1898      * Inputs:
1899      * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
1900      *      Since NNAPI feature level 3, this tensor may be zero-sized.
1901      * * 1: A scalar, specifying the positive scaling factor for the exponent,
1902      *      beta. If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT32},
1903      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
1904      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, the scalar
1905      *      must be of {@link ANEURALNETWORKS_FLOAT32}.
1906      *      If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, then the
1907      *      scalar must be of {@link ANEURALNETWORKS_FLOAT16}.
1908      * * 2: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1,
1909      *      specifying the dimension the activation would be performed on.
1910      *      Negative index is used to specify axis from the end (e.g. -1 for
1911      *      the last axis). Must be in the range [-n, n).
1912      *      Available since NNAPI feature level 3.
1913      *
1914      * Outputs:
1915      * * 0: The output tensor of same shape as input0.
1916      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
1917      *      the scale must be 1.f / 256 and the zeroPoint must be 0.
1918      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
1919      *      the scale must be 1.f / 256 and the zeroPoint must be -128.
1920      *
1921      * Available since NNAPI feature level 1.
1922      */
1923     ANEURALNETWORKS_SOFTMAX = 25,
1924 
1925     /**
1926      * Rearranges blocks of spatial data, into depth.
1927      *
1928      * More specifically, this op outputs a copy of the input tensor where
1929      * values from the height and width dimensions are moved to the depth
1930      * dimension. The value block_size indicates the input block size and how
1931      * the data is moved.
1932      *
1933      * Chunks of data of size block_size * block_size from depth are rearranged
1934      * into non-overlapping blocks of size block_size x block_size.
1935      *
1936      * The depth of the output tensor is input_depth * block_size * block_size.
1937      * The input tensor's height and width must be divisible by block_size.
1938      *
1939      * Supported tensor {@link OperandCode}:
1940      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1941      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1942      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1943      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1944      *
1945      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1946      * With the default data layout NHWC, the data is stored in the order of:
1947      * [batch, height, width, channels]. Alternatively, the data layout could
1948      * be NCHW, the data storage order of: [batch, channels, height, width].
1949      * NCHW is supported since NNAPI feature level 3.
1950      *
1951      * Inputs:
1952      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
1953      *      specifying the input.
1954      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size.
1955      *      block_size must be >=1 and block_size must be a divisor of both the
1956      *      input height and width.
1957      * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1958      *      Set to true to specify NCHW data layout for input0 and output0.
1959      *      Available since NNAPI feature level 3.
1960      *
1961      * Outputs:
1962      * * 0: The output 4-D tensor, of shape [batches, height/block_size,
1963      *      width/block_size, depth_in*block_size*block_size].
1964      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1965      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1966      *      the scale and zeroPoint must be the same as input0.
1967      *
1968      * Available since NNAPI feature level 1.
1969      */
1970     ANEURALNETWORKS_SPACE_TO_DEPTH = 26,
1971 
1972     /**
1973      * SVDF op is a kind of stateful layer derived from the notion that a
1974      * densely connected layer that's processing a sequence of input frames can
1975      * be approximated by using a singular value decomposition of each of its
1976      * nodes. The implementation is based on:
1977      *
1978      * https://research.google.com/pubs/archive/43813.pdf
1979      *
1980      * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada.
1981      * “Compressing Deep Neural Networks using a Rank-Constrained Topology”.
1982      * INTERSPEECH, 2015.
1983      *
1984      * It processes the incoming input using a 2-stage filtering mechanism:
1985      * * stage 1 performs filtering on the "features" dimension, whose outputs
1986      *   get pushed into a memory of fixed-size memory_size.
1987      * * stage 2 performs filtering on the "time" dimension of the memory_size
1988      *   memoized outputs of stage 1.
1989      *
1990      * Specifically, for rank 1, this layer implements the operation:
1991      *
1992      *     memory = push(conv1d(inputs, weights_feature, feature_dim,
1993      *                          "ANEURALNETWORKS_PADDING_VALID"));
1994      *     outputs = activation(memory * weights_time + bias);
1995      *
1996      * Where:
1997      * * “weights_feature” is a weights matrix that processes the inputs (by
1998      *   convolving the input with every “feature filter”), and whose outputs
1999      *   get pushed, stacked in order, into the fixed-size “memory” (the oldest
2000      *   entry gets dropped);
2001      * * “weights_time” is a weights matrix that processes the “memory” (by a
2002      *   batched matrix multiplication on the num_units);
2003      * * “bias” is an optional bias vector (added to each output vector in the
2004      *   batch); and
2005      * * “activation” is the function passed as the “fused_activation_function”
2006      *   argument (if not “NONE”).
2007      *
2008      * Each rank adds a dimension to the weights matrices by means of stacking
2009      * the filters.
2010      *
2011      * Supported tensor {@link OperandCode}:
2012      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2013      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2014      *
2015      * All input tensors must be the same type.
2016      *
2017      * Inputs:
2018      * * 0: input.
2019      *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
2020      *      corresponds to the batching dimension, and “input_size” is the size
2021      *      of the input.
2022      * * 1: weights_feature.
2023      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
2024      *      corresponds to the number of units.
2025      * * 2: weights_time.
2026      *      A 2-D tensor of shape [num_units, memory_size], where “memory_size”
2027      *      corresponds to the fixed-size of the memory.
2028      * * 3: bias.
2029      *      An optional 1-D tensor of shape [num_units].
2030      * * 4: state (in).
2031      *      A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank].
2032      * * 5: rank.
2033      *      The rank of the SVD approximation.
2034      * * 6: fused_activation_function.
2035      *      An optional {@link FuseCode} value indicating the
2036      *      activation function. If “NONE” is specified then it results in a
2037      *      linear activation.
2038      *
2039      * Outputs:
2040      * * 0: state (out).
2041      *      A 2-D tensor of the same {@link OperandCode} as the inputs, with shape
2042      *      [batch_size, (memory_size - 1) * num_units * rank].
2043      * * 1: output.
2044      *      A 2-D tensor of the same {@link OperandCode} as the inputs, with shape
2045      *      [batch_size, num_units].
2046      *
2047      * Available since NNAPI feature level 1.
2048      */
2049     ANEURALNETWORKS_SVDF = 27,
2050 
2051     /**
2052      * Computes hyperbolic tangent of input tensor element-wise.
2053      *
2054      * The output is calculated using this formula:
2055      *
2056      *     output = tanh(input)
2057      *
2058      * Supported tensor {@link OperandCode}:
2059      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2060      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2061      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3)
2062      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2063      *
2064      * Supported tensor rank: up to 4.
2065      *
2066      * Inputs:
2067      * * 0: A tensor, specifying the input.
2068      *      Since NNAPI feature level 3, this tensor may be zero-sized.
2069      *
2070      * Outputs:
2071      * * 0: The output tensor of same shape as input0.
2072      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
2073      *      the scale must be 1.f / 128 and the zeroPoint must be 128.
2074      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
2075      *      the scale must be 1.f / 128 and the zeroPoint must be 0.
2076      *
2077      * Available since NNAPI feature level 1.
2078      */
2079     ANEURALNETWORKS_TANH = 28,
2080 
2081     // Operations below are available since NNAPI feature level 2.
2082 
2083     /**
2084      * BatchToSpace for N-dimensional tensors.
2085      *
2086      * This operation reshapes the batch dimension (dimension 0) into M + 1
2087      * dimensions of shape block_shape + [batch], interleaves these blocks back
2088      * into the grid defined by the spatial dimensions [1, ..., M], to obtain a
2089      * result with the same rank as the input.
2090      *
2091      * This is the reverse of SpaceToBatch.
2092      *
2093      * Supported tensor {@link OperandCode}:
2094      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2095      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2096      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2097      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2098      *
2099      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
2100      * With the default data layout NHWC, the data is stored in the order of:
2101      * [batch, height, width, channels]. Alternatively, the data layout could
2102      * be NCHW, the data storage order of: [batch, channels, height, width].
2103      * NCHW is supported since NNAPI feature level 3.
2104      *
2105      * Inputs:
2106      * * 0: An n-D tensor, specifying the tensor to be reshaped
2107      * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block
2108      *      sizes for each spatial dimension of the input tensor. All values
2109      *      must be >= 1.
2110      * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
2111      *      Set to true to specify NCHW data layout for input0 and output0.
2112      *      Available since API level 29.
2113      *
2114      * Outputs:
2115      * * 0: A tensor of the same {@link OperandCode} as input0.
2116      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2117      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2118      *      the scale and zeroPoint must be the same as input0.
2119      *
2120      * Available since NNAPI feature level 2.
2121      */
2122     ANEURALNETWORKS_BATCH_TO_SPACE_ND = 29,
2123 
2124     /**
2125      * Element-wise division of two tensors.
2126      *
2127      * Takes two input tensors of identical {@link OperandCode} and compatible
2128      * dimensions. The output is the result of dividing the first input tensor
2129      * by the second, optionally modified by an activation function.
2130      *
2131      * For inputs of {@link ANEURALNETWORKS_TENSOR_INT32}, performs
2132      * "floor division" ("//" in Python). For example,
2133      *     5 // 2 = 2
2134      *    -5 // 2 = -3
2135      *
2136      * Two dimensions are compatible when:
2137      *     1. they are equal, or
2138      *     2. one of them is 1
2139      *
2140      * The size of the output is the maximum size along each dimension of the
2141      * input operands. It starts with the trailing dimensions, and works its way
2142      * forward.
2143      *
2144      * Example:
2145      *     input1.dimension =    {4, 1, 2}
2146      *     input2.dimension = {5, 4, 3, 1}
2147      *     output.dimension = {5, 4, 3, 2}
2148      *
2149      * Since NNAPI feature level 3, generic zero-sized input tensor is supported. Zero
2150      * dimension is only compatible with 0 or 1. The size of the output
2151      * dimension is zero if either of corresponding input dimension is zero.
2152      *
2153      * Supported tensor {@link OperandCode}:
2154      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2155      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2156      * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4)
2157      *
2158      * Supported tensor rank: up to 4
2159      *
2160      * Inputs:
2161      * * 0: An n-D tensor, specifying the first input.
2162      * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
2163      *      as input0.
2164      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
2165      *      {@link FuseCode} values. Specifies the activation to
2166      *      invoke on the result.
2167      *      For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,
2168      *      the {@link FuseCode} must be "NONE".
2169      *
2170      * Outputs:
2171      * * 0: A tensor of the same {@link OperandCode} as input0.
2172      *
2173      * Available since NNAPI feature level 2.
2174      */
2175     ANEURALNETWORKS_DIV = 30,
2176 
2177     /**
2178      * Computes the mean of elements across dimensions of a tensor.
2179      *
2180      * Reduces the input tensor along the given dimensions to reduce. Unless
2181      * keep_dims is true, the rank of the tensor is reduced by 1 for each entry
2182      * in axis. If keep_dims is true, the reduced dimensions are retained with
2183      * length 1.
2184      *
2185      * Supported tensor {@link OperandCode}:
2186      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2187      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2188      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2189      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2190      *
2191      * Supported tensor rank: up to 4
2192      *
2193      * Inputs:
2194      * * 0: A tensor, specifying the input.
2195      * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
2196      *      to reduce. Must be in the range
2197      *      [-rank(input_tensor), rank(input_tensor)).
2198      *
2199      *      NOTE: When the operation was introduced, the documentation
2200      *      incorrectly stated that if dimensions were empty, the operation
2201      *      would reduce across all dimensions. This behavior was never
2202      *      implemented.
2203      *
2204      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, keep_dims. If positive,
2205      *      retains reduced dimensions with length 1.
2206      *
2207      * Outputs:
2208      * * 0: A tensor of the same {@link OperandCode} as input0.
2209      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2210      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2211      *      the scale and zeroPoint must be the same as input0.
2212      *      If all dimensions are reduced and keep_dims is false, the output
2213      *      shape is [1].
2214      *
2215      * Available since NNAPI feature level 2.
2216      */
2217     ANEURALNETWORKS_MEAN = 31,
2218 
2219     /**
2220      * Pads a tensor.
2221      *
2222      * This operation pads a tensor according to the specified paddings.
2223      *
2224      * Supported tensor {@link OperandCode}:
2225      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2226      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2227      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2228      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2229      *   (full support since NNAPI feature level 3, see the output section)
2230      *
2231      * Supported tensor rank: up to 4
2232      *
2233      * Inputs:
2234      * * 0: An n-D tensor, specifying the tensor to be padded.
2235      * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings
2236      *      for each spatial dimension of the input tensor. The shape of the
2237      *      tensor must be {rank(input0), 2}.
2238      *      padding[i, 0] specifies the number of elements to be padded in the
2239      *      front of dimension i.
2240      *      padding[i, 1] specifies the number of elements to be padded after the
2241      *      end of dimension i.
2242      *
2243      * Outputs:
2244      * * 0: A tensor of the same {@link OperandCode} as input0. The
2245      *      output tensor has the same rank as input0, and each
2246      *      dimension of the output tensor has the same size as the
2247      *      corresponding dimension of the input tensor plus the size
2248      *      of the padding:
2249      *          output0.dimension[i] =
2250      *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
2251      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2252      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2253      *      the scale and zeroPoint must be the same as input0.
2254      *
2255      *      NOTE: Before NNAPI feature level 3, the pad value for
2256      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined.
2257      *      Since NNAPI feature level 3, the pad value is always the logical zero.
2258      *
2259      * Available since NNAPI feature level 2.
2260      */
2261     ANEURALNETWORKS_PAD = 32,
2262 
2263     /**
2264      * SpaceToBatch for N-Dimensional tensors.
2265      *
2266      * This operation divides "spatial" dimensions [1, ..., M] of the input into
2267      * a grid of blocks of shape block_shape, and interleaves these blocks with
2268      * the "batch" dimension (0) such that in the output, the spatial dimensions
2269      * [1, ..., M] correspond to the position within the grid, and the batch
2270      * dimension combines both the position within a spatial block and the
2271      * original batch position. Prior to division into blocks, the spatial
2272      * dimensions of the input are optionally zero padded according to paddings.
2273      *
2274      * Supported tensor {@link OperandCode}:
2275      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2276      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2277      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2278      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2279      *   (full support since NNAPI feature level 3, see the output section)
2280      *
2281      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
2282      * With the default data layout NHWC, the data is stored in the order of:
2283      * [batch, height, width, channels]. Alternatively, the data layout could
2284      * be NCHW, the data storage order of: [batch, channels, height, width].
2285      * NCHW is supported since NNAPI feature level 3.
2286      *
2287      * Inputs:
2288      * * 0: An n-D tensor, specifying the input.
2289      * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block
2290      *      sizes for each spatial dimension of the input tensor. All values
2291      *      must be >= 1.
2292      * * 2: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings
2293      *      for each spatial dimension of the input tensor. All values must be
2294      *      >= 0. The shape of the tensor must be {M, 2}, where M is the number
2295      *      of spatial dimensions.
2296      *      padding[i, 0] specifies the number of element to be padded in the
2297      *      front of dimension i.
2298      *      padding[i, 1] specifies the number of element to be padded after the
2299      *      end of dimension i.
2300      * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
2301      *      Set to true to specify NCHW data layout for input0 and output0.
2302      *      Available since NNAPI feature level 3.
2303      *
2304      * Outputs:
2305      * * 0: A tensor of the same {@link OperandCode} as input0.
2306      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2307      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2308      *      the scale and zeroPoint must be the same as input0.
2309      *
2310      *      NOTE: Before NNAPI feature level 3, the pad value for
2311      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined.
2312      *      Since NNAPI feature level 3, the pad value is always the logical zero.
2313      *
2314      * Available since NNAPI feature level 2.
2315      */
2316     ANEURALNETWORKS_SPACE_TO_BATCH_ND = 33,
2317 
2318     /**
2319      * Removes dimensions of size 1 from the shape of a tensor.
2320      *
2321      * Given a tensor input, this operation returns a tensor of the same
2322      * {@link OperandCode} with all dimensions of size 1 removed. If you don't
2323      * want to remove all size 1 dimensions, you can remove specific size 1
2324      * dimensions by specifying the axes (input1).
2325      *
2326      * Supported tensor {@link OperandCode}:
2327      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2328      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2329      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2330      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2331      *
2332      * Supported tensor rank: up to 4
2333      *
2334      * Inputs:
2335      * * 0: An n-D tensor, the tensor to be squeezed.
2336      * * 1: An optional 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
2337      *      dimensions to squeeze. If specified only squeezes the dimensions
2338      *      listed. Otherwise, squeezes all dimensions. The dimension index
2339      *      starts at 0. An error must be reported if squeezing a dimension that
2340      *      is not 1.
2341      *
2342      * Outputs:
2343      * * 0: A tensor of the same {@link OperandCode} as input0. Contains the
2344      *      same data as input, but has one or more dimensions of size 1
2345      *      removed.
2346      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2347      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2348      *      the scale and zeroPoint must be the same as input0.
2349      *      If all input dimensions are equal to 1 and are to be squeezed, the
2350      *      output shape is [1].
2351      *
2352      * Available since NNAPI feature level 2.
2353      */
2354     ANEURALNETWORKS_SQUEEZE = 34,
2355 
2356     /**
2357      * Extracts a strided slice of a tensor.
2358      *
2359      * Roughly speaking, this op extracts a slice of size (end - begin) / stride
2360      * from the given input tensor. Starting at the location specified by begin
2361      * the slice continues by adding stride to the index until all dimensions
2362      * are not less than end. Note that a stride can be negative, which causes a
2363      * reverse slice.
2364      *
2365      * Supported tensor {@link OperandCode}:
2366      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2367      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2368      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2369      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2370      *
2371      * Supported tensor rank: up to 4
2372      *
2373      * Inputs:
2374      * * 0: An n-D tensor, specifying the tensor to be sliced.
2375      * * 1: begin, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
2376      *      starts of the dimensions of the input tensor to be sliced. The
2377      *      length must be of rank(input0).
2378      * * 2: end, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
2379      *      ends of the dimensions of the input tensor to be sliced. The length
2380      *      must be of rank(input0).
2381      * * 3: strides, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
2382      *      strides of the dimensions of the input tensor to be sliced. The
2383      *      length must be of rank(input0). The entries must be non-zero.
2384      * * 4: begin_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit
2385      *      of begin_mask is set, begin[i] is ignored and the fullest possible
2386      *      range in that dimension is used instead.
2387      * * 5: end_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit of
2388      *      end_mask is set, end[i] is ignored and the fullest possible range in
2389      *      that dimension is used instead.
2390      * * 6: shrink_axis_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the
2391      *      ith bit of shrink_axis_mask is set, the ith dimension specification
2392      *      shrinks the dimensionality by 1, taking on the value at index
2393      *      begin[i]. In this case, the ith specification must define a
2394      *      slice of size 1, e.g. begin[i] = x, end[i] = x + 1.
2395      *
2396      * Outputs:
2397      * * 0: A tensor of the same {@link OperandCode} as input0 and rank (n - k),
2398      *      where k is the number of bits set in shrink_axis_mask.
2399      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2400      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2401      *      the scale and zeroPoint must be the same as input0.
2402      *      If shrink_axis_mask is true for all input dimensions, the output
2403      *      shape is [1].
2404      *
2405      * Available since NNAPI feature level 2.
2406      */
2407     ANEURALNETWORKS_STRIDED_SLICE = 35,
2408 
2409     /**
2410      * Element-wise subtraction of two tensors.
2411      *
2412      * Takes two input tensors of identical {@link OperandCode} and compatible
2413      * dimensions. The output is the result of subtracting the second input
2414      * tensor from the first one, optionally modified by an activation function.
2415      *
2416      * Two dimensions are compatible when:
2417      *     1. they are equal, or
2418      *     2. one of them is 1
2419      *
2420      * The size of the output is the maximum size along each dimension of the
2421      * input operands. It starts with the trailing dimensions, and works its way
2422      * forward.
2423      *
2424      * Example:
2425      *     input1.dimension =    {4, 1, 2}
2426      *     input2.dimension = {5, 4, 3, 1}
2427      *     output.dimension = {5, 4, 3, 2}
2428      *
2429      * Since NNAPI feature level 3, generic zero-sized input tensor is supported. Zero
2430      * dimension is only compatible with 0 or 1. The size of the output
2431      * dimension is zero if either of corresponding input dimension is zero.
2432      *
2433      * Supported tensor {@link OperandCode}:
2434      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2435      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2436      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3)
2437      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2438      * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4)
2439      *
2440      * Supported tensor rank: up to 4
2441      *
2442      * Inputs:
2443      * * 0: An n-D tensor, specifying the first input.
2444      * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
2445      *      as input0.
2446      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
2447      *      {@link FuseCode} values. Specifies the activation to
2448      *      invoke on the result.
2449      *      For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,
2450      *      the {@link FuseCode} must be "NONE".
2451      *
2452      * Outputs:
2453      * * 0: A tensor of the same {@link OperandCode} as input0.
2454      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2455      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2456      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
2457      *
2458      * Available since NNAPI feature level 2.
2459      */
2460     ANEURALNETWORKS_SUB = 36,
2461 
2462     /**
2463      * Transposes the input tensor, permuting the dimensions according to the
2464      * perm tensor.
2465      *
2466      * The returned tensor's dimension i corresponds to the input dimension
2467      * perm[i]. If perm is not given, it is set to (n-1...0), where n is the
2468      * rank of the input tensor. Hence by default, this operation performs a
2469      * regular matrix transpose on 2-D input Tensors.
2470      *
2471      * Supported tensor {@link OperandCode}:
2472      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2473      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2474      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2475      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2476      *
2477      * Supported tensor rank: up to 4
2478      *
2479      * Inputs:
2480      * * 0: An n-D tensor, specifying the tensor to be transposed.
2481      *      Since NNAPI feature level 3, this tensor may be zero-sized.
2482      * * 1: An optional 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32},
2483      *      the permutation of the dimensions of the input tensor.
2484      *
2485      * Outputs:
2486      * * 0: A tensor of the same {@link OperandCode} as input0.
2487      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2488      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2489      *      the scale and zeroPoint must be the same as input0.
2490      *
2491      * Available since NNAPI feature level 2.
2492      */
2493     ANEURALNETWORKS_TRANSPOSE = 37,
2494 
2495     // Operations below are available since NNAPI feature level 3.
2496 
2497     /**
2498      * Computes the absolute value of a tensor, element-wise.
2499      *
2500      * Supported tensor {@link OperandCode}:
2501      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2502      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2503      * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4)
2504      *
2505      * Supported tensor rank: from 1.
2506      *
2507      * Inputs:
2508      * * 0: A tensor.
2509      *
2510      * Outputs:
2511      * * 0: The output tensor of same shape as input0.
2512      *
2513      * Available since NNAPI feature level 3.
2514      */
2515     ANEURALNETWORKS_ABS = 38,
2516 
2517     /**
2518      * Returns the index of the largest element along an axis.
2519      *
2520      * Supported tensor {@link OperandCode}:
2521      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2522      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2523      * * {@link ANEURALNETWORKS_TENSOR_INT32}
2524      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2525      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2526      *
2527      * Supported tensor rank: from 1
2528      *
2529      * Inputs:
2530      * * 0: An n-D tensor specifying the input. Must be non-empty.
2531      * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to
2532      *      reduce across. Negative index is used to specify axis from the
2533      *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
2534      *
2535      * Outputs:
2536      * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor.
2537      *      If input is 1-dimensional, the output shape is [1].
2538      *
2539      * Available since NNAPI feature level 3.
2540      */
2541     // There is no underscore in ARG_MAX to avoid name conflict with
2542     // the macro defined in libc/kernel/uapi/linux/limits.h.
2543     ANEURALNETWORKS_ARGMAX = 39,
2544 
2545     /**
2546      * Returns the index of the smallest element along an axis.
2547      *
2548      * Supported tensor {@link OperandCode}:
2549      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2550      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2551      * * {@link ANEURALNETWORKS_TENSOR_INT32}
2552      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2553      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2554      *
2555      * Supported tensor rank: from 1
2556      *
2557      * Inputs:
2558      * * 0: An n-D tensor specifying the input. Must be non-empty.
2559      * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to
2560      *      reduce across. Negative index is used to specify axis from the
2561      *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
2562      *
2563      * Outputs:
2564      * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor.
2565      *      If input is 1-dimensional, the output shape is [1].
2566      *
2567      * Available since NNAPI feature level 3.
2568      */
2569     ANEURALNETWORKS_ARGMIN = 40,  // See ARGMAX for naming discussion.
2570 
2571     /**
2572      * Transform axis-aligned bounding box proposals using bounding box deltas.
2573      *
2574      * Given the positions of bounding box proposals and the corresponding
2575      * bounding box deltas for each class, return the refined bounding box
2576      * regions. The resulting bounding boxes are cliped against the edges of
2577      * the image.
2578      *
2579      * Supported tensor {@link OperandCode}:
2580      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2581      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2582      * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}
2583      *
2584      * Inputs:
2585      * * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the
2586      *      bounding box proposals, each line with format [x1, y1, x2, y2].
2587      *      For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},
2588      *      the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois
2589      *      is supported for this tensor.
2590      * * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the
2591      *      bounding box delta for each region of interest and each class. The
2592      *      bounding box deltas are organized in the following order
2593      *      [dx, dy, dw, dh], where dx and dy is the relative correction factor
2594      *      for the center position of the bounding box with respect to the width
2595      *      and height, dw and dh is the log-scale relative correction factor
2596      *      for the width and height. For input0 of type
2597      *      {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, this tensor should be
2598      *      of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
2599      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}. Zero num_rois is
2600      *      supported for this tensor.
2601      * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
2602      *      [num_rois], specifying the batch index of each box. Boxes with
2603      *      the same batch index are grouped together. Zero num_rois is
2604      *      supported for this tensor.
2605      * * 3: A 2-D Tensor of shape [batches, 2], specifying the information of
2606      *      each image in the batch, each line with format
2607      *      [image_height, image_width].
2608      *
2609      * Outputs:
2610      * * 0: A tensor of the same {@link OperandCode} as input0, with shape
2611      *      [num_rois, num_classes * 4], specifying the coordinates of each
2612      *      output bounding box for each class, with format [x1, y1, x2, y2].
2613      *      For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the
2614      *      scale must be 0.125 and the zero point must be 0.
2615      *
2616      * Available since NNAPI feature level 3.
2617      */
2618     ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM = 41,
2619 
2620     /**
2621      * A recurrent neural network layer that applies an LSTM cell to a
2622      * sequence of inputs in forward and backward directions.
2623      *
2624      * The op supports cross-linking via an auxiliary input. Regular cell feeds
2625      * one input into the two RNN cells in the following way:
2626      *
2627      *       INPUT  (INPUT_REVERSED)
2628      *         |         |
2629      *    ---------------------
2630      *    | FW_LSTM   BW_LSTM |
2631      *    ---------------------
2632      *         |         |
2633      *      FW_OUT     BW_OUT
2634      *
2635      * An op with cross-linking takes two inputs and feeds them into the RNN
2636      * cells in the following way:
2637      *
2638      *       AUX_INPUT   (AUX_INPUT_REVERSED)
2639      *           |             |
2640      *     INPUT | (INPUT_R'D.)|
2641      *       |   |       |     |
2642      *    -----------------------
2643      *    |  \  /        \    / |
2644      *    | FW_LSTM     BW_LSTM |
2645      *    -----------------------
2646      *         |           |
2647      *      FW_OUT      BW_OUT
2648      *
2649      * The cross-linking mode is enabled iff auxiliary input and auxiliary
2650      * weights are present. While stacking this op on top of itself, this
2651      * allows to connect both forward and backward outputs from previous cell
2652      * to the next cell's input.
2653      *
2654      * Since NNAPI feature level 4 parallel linking mode is supported. The mode is
2655      * enabled if auxiliary input is present but auxiliary weights are omitted.
2656      * In this case, the cell feeds inputs into the RNN in the following way:
2657      *
2658      *       INPUT (AUX_INPUT_REVERSED)
2659      *         |         |
2660      *    ---------------------
2661      *    | FW_LSTM   BW_LSTM |
2662      *    ---------------------
2663      *         |         |
2664      *      FW_OUT     BW_OUT
2665      *
2666      * While stacking this op on top of itself, this allows to connect both
2667      * forward and backward outputs from previous cell to the next cell's
2668      * corresponding inputs.
2669      *
2670      * Supported tensor {@link OperandCode}:
2671      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2672      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2673      *
2674      * Supported tensor rank: 3, either time-major or batch-major.
2675      *
2676      * All input and output tensors must be of the same type.
2677      *
2678      * Inputs:
2679      * * 0: The input.
2680      *      A 3-D tensor of shape:
2681      *        If time-major: [max_time, batch_size, input_size]
2682      *        If batch-major: [batch_size, max_time, input_size]
2683      *      where "max_time" is the number of timesteps (sequence length),
2684      *      "batch_size" corresponds to the batching dimension, and
2685      *      "input_size" is the size of the input.
2686      * * 1: The forward input-to-input weights. Optional.
2687      *      A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units”
2688      *      corresponds to the number of forward cell units.
2689      * * 2: The forward input-to-forget weights.
2690      *      A 2-D tensor of shape [fw_num_units, input_size].
2691      * * 3: The forward input-to-cell weights.
2692      *      A 2-D tensor of shape [fw_num_units, input_size].
2693      * * 4: The forward input-to-output weights.
2694      *      A 2-D tensor of shape [fw_num_units, input_size].
2695      * * 5: The forward recurrent-to-input weights. Optional.
2696      *      A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size”
2697      *      corresponds to either the number of cell units (i.e., fw_num_units),
2698      *      or the second dimension of the “fw_projection_weights”, if defined.
2699      * * 6: The forward recurrent-to-forget weights.
2700      *      A 2-D tensor of shape [fw_num_units, fw_output_size].
2701      * * 7: The forward recurrent-to-cell weights.
2702      *      A 2-D tensor of shape [fw_num_units, fw_output_size].
2703      * * 8: The forward recurrent-to-output weights.
2704      *      A 2-D tensor of shape [fw_num_units, fw_output_size].
2705      * * 9: The forward cell-to-input weights. Optional.
2706      *      A 1-D tensor of shape [fw_num_units].
2707      * * 10: The forward cell-to-forget weights. Optional.
2708      *       A 1-D tensor of shape [fw_num_units].
2709      * * 11: The forward cell-to-output weights. Optional.
2710      *       A 1-D tensor of shape [fw_num_units].
2711      * * 12: The forward input gate bias. Optional.
2712      *       A 1-D tensor of shape [fw_num_units].
2713      * * 13: The forward forget gate bias.
2714      *       A 1-D tensor of shape [fw_num_units].
2715      * * 14: The forward cell gate bias.
2716      *       A 1-D tensor of shape [fw_num_units].
2717      * * 15: The forward output gate bias.
2718      *       A 1-D tensor of shape [fw_num_units].
2719      * * 16: The forward projection weights. Optional.
2720      *       A 2-D tensor of shape [fw_output_size, fw_num_units].
2721      * * 17: The forward projection bias. Optional.
2722      *       A 1-D tensor of shape [fw_output_size].
2723      * * 18: The backward input-to-input weights. Optional.
2724      *       A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units”
2725      *       corresponds to the number of backward cell units.
2726      * * 19: The backward input-to-forget weights.
2727      *       A 2-D tensor of shape [bw_num_units, input_size].
2728      * * 20: The backward input-to-cell weights.
2729      *       A 2-D tensor of shape [bw_num_units, input_size].
2730      * * 21: The backward input-to-output weights.
2731      *       A 2-D tensor of shape [bw_num_units, input_size].
2732      * * 22: The backward recurrent-to-input weights. Optional.
2733      *       A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size”
2734      *       corresponds to either the number of cell units (i.e., “bw_num_units”),
2735      *       or the second dimension of the “bw_projection_weights”, if defined.
2736      * * 23: The backward recurrent-to-forget weights.
2737      *       A 2-D tensor of shape [bw_num_units, bw_output_size].
2738      * * 24: The backward recurrent-to-cell weights.
2739      *       A 2-D tensor of shape [bw_num_units, bw_output_size].
2740      * * 25: The backward recurrent-to-output weights.
2741      *       A 2-D tensor of shape [bw_num_units, bw_output_size].
2742      * * 26: The backward cell-to-input weights. Optional.
2743      *       A 1-D tensor of shape [bw_num_units].
2744      * * 27: The backward cell-to-forget weights. Optional.
2745      *       A 1-D tensor of shape [bw_num_units].
2746      * * 28: The backward cell-to-output weights. Optional.
2747      *       A 1-D tensor of shape [bw_num_units].
2748      * * 29: The backward input gate bias. Optional.
2749      *       A 1-D tensor of shape [bw_num_units].
2750      * * 30: The backward forget gate bias.
2751      *       A 1-D tensor of shape [bw_num_units].
2752      * * 31: The backward cell gate bias.
2753      *       A 1-D tensor of shape [bw_num_units].
2754      * * 32: The backward output gate bias.
2755      *       A 1-D tensor of shape [bw_num_units].
2756      * * 33: The backward projection weights. Optional.
2757      *       A 2-D tensor of shape [bw_output_size, bw_num_units].
2758      * * 34: The backward projection bias. Optional.
2759      *       A 1-D tensor of shape [bw_output_size].
2760      * * 35: The forward input activation state.
2761      *       A 2-D tensor of shape [batch_size, bw_output_size].
2762      * * 36: The forward input cell state.
2763      *       A 2-D tensor of shape [batch_size, bw_num_units].
2764      * * 37: The backward input activation state.
2765      *       A 2-D tensor of shape [batch_size, bw_output_size].
2766      * * 38: The backward input cell state.
2767      *       A 2-D tensor of shape [batch_size, bw_num_units].
2768      * * 39: The auxiliary input. Optional.
2769      *       A 3-D tensor of shape [max_time, batch_size, aux_input_size],
2770      *       where “batch_size” corresponds to the batching dimension, and
2771      *       “aux_input_size” is the size of the auxiliary input. Optional. See
2772      *       the docs above for the usage modes explanation.
2773      * * 40: The forward auxiliary input-to-input weights.
2774      *       Optional. See the docs above for the usage modes explanation.
2775      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2776      * * 41: The forward auxiliary input-to-forget weights.
2777      *       Optional. See the docs above for the usage modes explanation.
2778      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2779      * * 42: The forward auxiliary input-to-cell weights.
2780      *       Optional. See the docs above for the usage modes explanation.
2781      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2782      * * 43: The forward auxiliary input-to-output weights.
2783      *       Optional. See the docs above for the usage modes explanation.
2784      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2785      * * 44: The backward auxiliary input-to-input weights.
2786      *       Optional. See the docs above for the usage modes explanation.
2787      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2788      * * 45: The backward auxiliary input-to-forget weights.
2789      *       Optional. See the docs above for the usage modes explanation.
2790      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2791      * * 46: The backward auxiliary input-to-cell weights.
2792      *       Optional. See the docs above for the usage modes explanation.
2793      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2794      * * 47: The backward auxiliary input-to-output weights.
2795      *       Optional. See the docs above for the usage modes explanation.
2796      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2797      * * 48: The activation function.
2798      *       A value indicating the activation function:
2799      *       <ul>
2800      *       <li>0: None;
2801      *       <li>1: Relu;
2802      *       <li>3: Relu6;
2803      *       <li>4: Tanh;
2804      *       <li>6: Sigmoid.
2805      *       </ul>
2806      * * 49: The clipping threshold for the cell state, such
2807      *       that values are bound within [-cell_clip, cell_clip]. If set to 0.0
2808      *       then clipping is disabled.
2809      *       If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32},
2810      *       this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
2811      *       otherwise if all the input tensors have the type
2812      *       {@link ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be
2813      *       of type {@link ANEURALNETWORKS_FLOAT16}.
2814      * * 50: The clipping threshold for the output from the
2815      *       projection layer, such that values are bound within
2816      *       [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
2817      *       If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32},
2818      *       this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
2819      *       otherwise if all the input tensors have the type
2820      *       {@link ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be
2821      *       of type {@link ANEURALNETWORKS_FLOAT16}.
2822      * * 51: merge_outputs
2823      *       An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs
2824      *       from forward and backward cells should be merged.
2825      * * 52: time_major
2826      *       An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format
2827      *       of input and output tensors.
2828      * * 53: The forward input layer normalization weights. Optional.
2829      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2830      *       to activation at input gate.
2831      * * 54: The forward forget layer normalization weights. Optional.
2832      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2833      *       to activation at forget gate.
2834      * * 55: The forward cell layer normalization weights. Optional.
2835      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2836      *       to activation at cell gate.
2837      * * 56: The forward output layer normalization weights. Optional.
2838      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2839      *       to activation at output gate.
2840      * * 57: The backward input layer normalization weights. Optional.
2841      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2842      *       to activation at input gate.
2843      * * 58: The backward forget layer normalization weights. Optional.
2844      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2845      *       to activation at forget gate.
2846      * * 59: The backward cell layer normalization weights. Optional.
2847      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2848      *       to activation at cell gate.
2849      * * 60: The backward output layer normalization weights. Optional.
2850      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2851      *       to activation at output gate.
2852      *
2853      * Outputs:
2854      * * 0: The forward output.
2855      *      A 3-D tensor of shape:
2856      *        If time-major and not merge_outputs:
2857      *          [max_time, batch_size, fw_output_size]
2858      *        If time-major and merge_outputs:
2859      *          [max_time, batch_size, fw_output_size + bw_output_size]
2860      *        If batch-major and not merge_outputs:
2861      *          [batch_size, max_time, fw_output_size]
2862      *        If batch-major and merge_outputs:
2863      *          [batch_size, max_time, fw_output_size + bw_output_size]
2864      * * 1: The backward output.  Unused if merge_outputs is true.
2865      *      A 3-D tensor of shape:
2866      *        If time-major: [max_time, batch_size, bw_output_size]
2867      *        If batch-major: [batch_size, max_time, bw_output_size]
2868      * * 2: The forward activation state output.
2869      *      A 2-D tensor of shape [batch_size, fw_output_size] containing an
2870      *      activation state from the last time step in the sequence. This
2871      *      output is optional and can be omitted. If this output is present
2872      *      then outputs 3-5 must be present as well.
2873      *      Available since NNAPI feature level 4.
2874      * * 3: The forward cell state output.
2875      *      A tensor of shape [batch_size, fw_cell_size] containing a cell state
2876      *      from the last time step in the sequence. This output is optional
2877      *      and can be omitted. If this output is present
2878      *      then outputs 2, 4, 5 must be present as well.
2879      *      Available since NNAPI feature level 4.
2880      * * 4: The backward activation state output.
2881      *      A 2-D tensor of shape [batch_size, bw_output_size] containing an
2882      *      activation state from the last time step in the sequence. This
2883      *      output is optional and can be omitted. If this output is present
2884      *      then outputs 2, 3, 5 must be present as well.
2885      *      Available since NNAPI feature level 4.
2886      * * 5: The backward cell state output.
2887      *      A tensor of shape [batch_size, bw_cell_size] containing a cell state
2888      *      from the last time step in the sequence. This output is optional
2889      *      and can be omitted. If this output is present
2890      *      then outputs 2-4 must be present as well.
2891      *      Available since NNAPI feature level 4.
2892      *
2893      * Available since NNAPI feature level 3.
2894      *
2895      * Important: As of NNAPI feature level 3, there is no way to get the output state tensors out
2896      * and NNAPI does not maintain internal states. This operator does not support the usage pattern
2897      * in which multiple cells are chained and state tensors are propagated.
2898      */
2899     ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM = 42,
2900 
2901     /**
2902      * A recurrent neural network layer that applies a basic RNN cell to a
2903      * sequence of inputs in forward and backward directions.
2904      *
2905      * This Op unrolls the input along the sequence dimension, and implements
2906      * the following operation for each element in the sequence s =
2907      * 1...sequence_length:
2908      *   fw_outputs[s] = fw_state = activation(inputs[s] * fw_input_weights’ +
2909      *          fw_state * fw_recurrent_weights’ + fw_bias)
2910      *
2911      * And for each element in sequence t = sequence_length : 1
2912      *   bw_outputs[t] = bw_state = activation(inputs[t] * bw_input_weights’ +
2913      *          bw_state * bw_recurrent_weights’ + bw_bias)
2914      *
2915      * Where:
2916      * * “{fw,bw}_input_weights” is a weight matrix that multiplies the inputs;
2917      * * “{fw,bw}_recurrent_weights” is a weight matrix that multiplies the
2918      *    current “state” which itself is the output from the previous time step
2919      *    computation;
2920      * * “{fw,bw}_bias” is a bias vector (added to each output vector in the
2921      *    batch);
2922      * * “activation” is the function passed as the “fused_activation_function”
2923      *   argument (if not “NONE”).
2924      *
2925      * The op supports cross-linking via an auxiliary input. Regular cell feeds
2926      * one input into the two RNN cells in the following way:
2927      *
2928      *       INPUT  (INPUT_REVERSED)
2929      *         |         |
2930      *    ---------------------
2931      *    | FW_RNN     BW_RNN |
2932      *    ---------------------
2933      *         |         |
2934      *      FW_OUT     BW_OUT
2935      *
2936      * An op with cross-linking takes two inputs and feeds them into the RNN
2937      * cells in the following way:
2938      *
2939      *       AUX_INPUT   (AUX_INPUT_REVERSED)
2940      *           |             |
2941      *     INPUT | (INPUT_R'D.)|
2942      *       |   |       |     |
2943      *    -----------------------
2944      *    |  \  /        \    / |
2945      *    | FW_RNN       BW_RNN |
2946      *    -----------------------
2947      *         |           |
2948      *      FW_OUT      BW_OUT
2949      *
2950      * The cross-linking mode is enabled iff auxiliary input and auxiliary
2951      * weights are present. While stacking this op on top of itself, this
2952      * allows to connect both forward and backward outputs from previous cell
2953      * to the next cell's input.
2954      *
2955      * Since NNAPI feature level 4 parallel linking mode is supported. The mode is
2956      * enabled if auxiliary input is present but auxiliary weights are omitted.
2957      * In this case, the cell feeds inputs into the RNN in the following way:
2958      *
2959      *       INPUT (AUX_INPUT_REVERSED)
2960      *         |         |
2961      *    ---------------------
2962      *    | FW_RNN     BW_RNN |
2963      *    ---------------------
2964      *         |         |
2965      *      FW_OUT     BW_OUT
2966      *
2967      * While stacking this op on top of itself, this allows to connect both
2968      * forward and backward outputs from previous cell to the next cell's
2969      * corresponding inputs.
2970      *
2971      * Supported tensor {@link OperandCode}:
2972      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2973      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2974      *
2975      * The input tensors must all be the same type.
2976      *
2977      * Inputs:
2978      * * 0: input.
2979      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
2980      *      it is set to true, then the input has a shape [maxTime, batchSize,
2981      *      inputSize], otherwise the input has a shape [batchSize, maxTime,
2982      *      inputSize].
2983      * * 1: fwWeights.
2984      *      A 2-D tensor of shape [fwNumUnits, inputSize].
2985      * * 2: fwRecurrentWeights.
2986      *      A 2-D tensor of shape [fwNumUnits, fwNumUnits].
2987      * * 3: fwBias.
2988      *      A 1-D tensor of shape [fwNumUnits].
2989      * * 4: fwHiddenState.
2990      *      A 2-D tensor of shape [batchSize, fwNumUnits]. Specifies a hidden
2991      *      state input for the first time step of the computation.
2992      * * 5: bwWeights.
2993      *      A 2-D tensor of shape [bwNumUnits, inputSize].
2994      * * 6: bwRecurrentWeights.
2995      *      A 2-D tensor of shape [bwNumUnits, bwNumUnits].
2996      * * 7: bwBias.
2997      *      A 1-D tensor of shape [bwNumUnits].
2998      * * 8: bwHiddenState
2999      *      A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden
3000      *      state input for the first time step of the computation.
3001      * * 9: auxInput.
3002      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
3003      *      it is set to true, then the input has a shape [maxTime, batchSize,
3004      *      auxInputSize], otherwise the input has a shape [batchSize, maxTime,
3005      *      auxInputSize]. Can be omitted. See the docs above for the usage
3006      *      modes explanation.
3007      * * 10:fwAuxWeights.
3008      *      A 2-D tensor of shape [fwNumUnits, auxInputSize]. Can be omitted.
3009      *      See the docs above for the usage modes explanation.
3010      * * 11:bwAuxWeights.
3011      *      A 2-D tensor of shape [bwNumUnits, auxInputSize]. Can be omitted.
3012      *      See the docs above for the usage modes explanation.
3013      * * 12:fusedActivationFunction.
3014      *      A {@link FuseCode} value indicating the activation function. If
3015      *      “NONE” is specified then it results in a linear activation.
3016      * * 13:timeMajor
3017      *      An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format
3018      *      of input and output tensors.
3019      * * 14:mergeOutputs
3020      *      An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs
3021      *      from forward and backward cells are separate (if set to false) or
3022      *      concatenated (if set to true).
3023      * Outputs:
3024      * * 0: fwOutput.
3025      *      A 3-D tensor. The first two dimensions of the shape are defined by
3026      *      the input 6 (timeMajor) and the third dimension is defined by the
3027      *      input 14 (mergeOutputs). If timeMajor is set to true, then the first
3028      *      two dimensions are [maxTime, batchSize], otherwise they are set to
3029      *      [batchSize, maxTime]. If mergeOutputs is set to true, then the third
3030      *      dimension is equal to (fwNumUnits + bwNumUnits), otherwise it is set
3031      *      to fwNumUnits.
3032      * * 1: bwOutput.
3033      *      A 3-D tensor. If the input 14 (mergeOutputs) is set to true, then
3034      *      this tensor is not produced. The shape is defined by the input 6
3035      *      (timeMajor). If it is set to true, then the shape is set to
3036      *      [maxTime, batchSize, bwNumUnits], otherwise the shape is set to
3037      *      [batchSize, maxTime, bwNumUnits].
3038      * * 2: The forward hidden state output.
3039      *      A 2-D tensor of shape [batchSize, fwNumUnits] containing a hidden
3040      *      state from the last time step in the sequence. This output is
3041      *      optional and can be omitted. If this output is present then output
3042      *      3 must be present as well.
3043      *      Available since NNAPI feature level 4.
3044      * * 3: The backward hidden state output.
3045      *      A 2-D tensor of shape [batchSize, bwNumUnits] containing a hidden
3046      *      state from the last time step in the sequence. This output is
3047      *      optional and can be omitted. If this output is present then output
3048      *      2 must be present as well.
3049      *      Available since NNAPI feature level 4.
3050      *
3051      * Available since NNAPI feature level 3.
3052      *
3053      * Important: As of NNAPI feature level 3, there is no way to get the output state tensors out
3054      * and NNAPI does not maintain internal states. This operator does not support the usage pattern
3055      * in which multiple cells are chained and state tensors are propagated.
3056      */
3057     ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN = 43,
3058 
3059     /**
3060      * Greedily selects a subset of bounding boxes in descending order of score.
3061      *
3062      * This op applies NMS algorithm to each class. In each loop of execution,
3063      * the box with maximum score gets selected and removed from the pending set.
3064      * The scores of the rest of boxes are lowered according to the
3065      * intersection-over-union (IOU) overlapping with the previously selected
3066      * boxes and a specified NMS kernel method. Any boxes with score less
3067      * than a threshold are removed from the pending set.
3068      *
3069      * Three NMS kernels are supported:
3070      * * Hard:     score_new = score_old * (1 if IoU < threshold else 0)
3071      * * Linear:   score_new = score_old * (1 if IoU < threshold else 1 - IoU)
3072      * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma)
3073      *
3074      * Axis-aligned bounding boxes are represented by its upper-left corner
3075      * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
3076      * bounding box should satisfy x1 <= x2 and y1 <= y2.
3077      *
3078      * Supported tensor {@link OperandCode}:
3079      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3080      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3081      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3082      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3083      *
3084      * Inputs:
3085      * * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score
3086      *      of each bounding box proposal. The boxes are grouped by batches in the
3087      *      first dimension. Zero num_rois is supported for this tensor.
3088      * * 1: A 2-D Tensor specifying the bounding boxes of shape
3089      *      [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2].
3090      *      The boxes are grouped by batches in the first dimension. The sequential
3091      *      order of the boxes corresponds with input0. For input0 of type
3092      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should be of
3093      *      {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and
3094      *      scale of 0.125.
3095      *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
3096      *      this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},
3097      *      with zeroPoint of -128 and scale of 0.125.
3098      *      Zero num_rois is supported for this tensor.
3099      * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3100      *      [num_rois], specifying the batch index of each box. Boxes with
3101      *      the same batch index are grouped together.
3102      * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, score_threshold. Boxes
3103      *      with scores lower than the threshold are filtered before sending
3104      *      to the NMS algorithm.
3105      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum
3106      *      number of selected bounding boxes for each image. Set to a negative
3107      *      value for unlimited number of output bounding boxes.
3108      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the NMS
3109      *      kernel method, options are 0:hard, 1:linear, 2:gaussian.
3110      * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU
3111      *      threshold in hard and linear NMS kernel. This field is ignored if
3112      *      gaussian kernel is selected.
3113      * * 7: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the sigma in
3114      *      gaussian NMS kernel. This field is ignored if gaussian kernel is
3115      *      not selected.
3116      * * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, nms_score_threshold.
3117      *      Boxes with scores lower than the threshold are dropped during the
3118      *      score updating phase in soft NMS.
3119      *
3120      * Outputs:
3121      * * 0: A 1-D Tensor of the same {@link OperandCode} as input0, with shape
3122      *      [num_output_rois], specifying the score of each output box. The boxes
3123      *      are grouped by batches, but the sequential order in each batch is not
3124      *      guaranteed. For type of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
3125      *      guaranteed. For type of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3126      *      or {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
3127      *      the scale and zero point must be the same as input0.
3128      * * 1: A 2-D Tensor of the same {@link OperandCode} as input1, with shape
3129      *      [num_output_rois, 4], specifying the coordinates of each
3130      *      output bounding box with the same format as input1. The sequential
3131      *      order of the boxes corresponds with output0. For type of
3132      *      {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the scale must be
3133      *      0.125 and the zero point must be 0.
3134      * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3135      *      [num_output_rois], specifying the class of each output box. The
3136      *      sequential order of the boxes corresponds with output0.
3137      * * 3: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3138      *      [num_output_rois], specifying the batch index of each box. Boxes
3139      *      with the same batch index are grouped together.
3140      *
3141      * Available since NNAPI feature level 3.
3142      */
3143     ANEURALNETWORKS_BOX_WITH_NMS_LIMIT = 44,
3144 
3145     /**
3146      * Casts a tensor to a type.
3147      *
3148      * This operation ignores the scale and zeroPoint of quanized tensors,
3149      * e.g. it treats a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} input
3150      * as a tensor of uint8 values.
3151      *
3152      * Supported tensor {@link OperandCode}:
3153      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3154      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3155      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3156      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3157      * Since NNAPI feature level 4, casting tensors of the following
3158      * {@link OperandCode} to the same {@link OperandCode} is supported:
3159      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3160      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3161      * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}
3162      * * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
3163      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
3164      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
3165      *
3166      * Supported tensor rank: from 1
3167      *
3168      * Inputs:
3169      * * 0: A tensor.
3170      *
3171      * Outputs:
3172      * * 0: A tensor with the same shape as input0.
3173      *
3174      * Available since NNAPI feature level 3.
3175      */
3176     ANEURALNETWORKS_CAST = 45,
3177 
3178     /**
3179      * Shuffle the channels of the input tensor.
3180      *
3181      * Given an input tensor and a integer value of num_groups, CHANNEL_SHUFFLE
3182      * divide the channel dimension into num_groups groups, and reorganize the
3183      * channels by grouping channels with the same index in each group.
3184      *
3185      * Along the channel dimension, the output is calculated using this formula:
3186      *
3187      *     output_channel[k * num_groups + g] = input_channel[g * group_size + k]
3188      *
3189      * where group_size = num_channels / num_groups
3190      *
3191      * The number of channels must be divisible by num_groups.
3192      *
3193      * Supported tensor {@link OperandCode}:
3194      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3195      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3196      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3197      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3198      *
3199      * Supported tensor rank: up to 4
3200      *
3201      * Inputs:
3202      * * 0: An n-D tensor, specifying the tensor to be shuffled.
3203      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
3204      *      groups.
3205      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the dimension
3206      *      channel shuffle would be performed on. Negative index is used to
3207      *      specify axis from the end (e.g. -1 for the last axis). Must be in
3208      *      the range [-n, n).
3209      *
3210      * Outputs:
3211      * * 0: A tensor of the same {@link OperandCode} and same shape as input0.
3212      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3213      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3214      *      the scale and zeroPoint must be the same as input0.
3215      *
3216      * Available since NNAPI feature level 3.
3217      */
3218     ANEURALNETWORKS_CHANNEL_SHUFFLE = 46,
3219 
3220     /**
3221      * Apply postprocessing steps to bounding box detections.
3222      *
3223      * Bounding box detections are generated by applying transformation on a set
3224      * of predefined anchors with the bounding box deltas from bounding box
3225      * regression. A final step of hard NMS is applied to limit the number of
3226      * returned boxes.
3227      *
3228      * Supported tensor {@link OperandCode}:
3229      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3230      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3231      *
3232      * Inputs:
3233      * * 0: A 3-D Tensor of shape [batches, num_anchors, num_classes], specifying
3234      *      the score of each anchor with each class. Class 0 for each
3235      *      [batches, num_anchors, 0] is background and will be ignored.
3236      * * 1: A 3-D Tensor of shape [batches, num_anchors, length_box_encoding], with
3237      *      the first four values in length_box_encoding specifying the bounding
3238      *      box deltas. The box deltas are encoded in the order of [dy, dx, dh, dw],
3239      *      where dy and dx is the linear-scale relative correction factor for the
3240      *      center position of the bounding box with respect to the width and height,
3241      *      dh and dw is the log-scale relative correction factor for the width and
3242      *      height. All the entries in length_box_encoding beyond the first four
3243      *      values are ignored in this operation.
3244      * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
3245      *      predefined anchor, with format [ctr_y, ctr_x, h, w], where ctr_y and
3246      *      ctr_x are the center position of the box, and h and w are the height
3247      *      and the width.
3248      * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
3249      *      factor for dy in bounding box deltas.
3250      * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
3251      *      factor for dx in bounding box deltas.
3252      * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
3253      *      factor for dh in bounding box deltas.
3254      * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
3255      *      factor for dw in bounding box deltas.
3256      * * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to use regular
3257      *      multi-class NMS algorithm that do NMS separately for each class,
3258      *      set to false for a faster algorithm that only do one single NMS
3259      *      using the highest class score..
3260      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, max_num_detections, specifying
3261      *      the maximum number of boxes for the output. Boxes with the lowest
3262      *      scores are discarded to meet the limit.
3263      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is
3264      *      set to false, specifying the maximum number of classes per detection.
3265      * * 10: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is
3266      *       set to true, specifying the maximum number of detections when
3267      *       applying NMS algorithm for each single class.
3268      * * 11: A scalar, score_threshold. Boxes with scores lower than the
3269      *       threshold are filtered before sending to the NMS algorithm. The
3270      *       scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of
3271      *       {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3272      *       {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3273      *       {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3274      * * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar
3275      *       must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of
3276      *       {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3277      *       {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3278      *       {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3279      * * 13: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to include
3280      *       background class in the list of label map for the output, set
3281      *       to false to not include the background. When the background
3282      *       class is included, it has label 0 and the output classes start
3283      *       at 1 in the label map, otherwise, the output classes start at 0.
3284      *
3285      * Outputs:
3286      * * 0: A 2-D tensor of the same {@link OperandCode} as input0, with shape
3287      *      [batches, max_num_detections], specifying the score of each output
3288      *      detections.
3289      * * 1: A 3-D tensor of shape [batches, max_num_detections, 4], specifying the
3290      *      coordinates of each output bounding box, with format
3291      *      [y1, x1, y2, x2].
3292      * * 2: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3293      *      [batches, max_num_detections], specifying the class label for each
3294      *      output detection.
3295      * * 3: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape [batches],
3296      *      specifying the number of valid output detections for each batch.
3297      *
3298      * Available since NNAPI feature level 3.
3299      */
3300     ANEURALNETWORKS_DETECTION_POSTPROCESSING = 47,
3301 
3302     /**
3303      * For input tensors x and y, computes x == y elementwise.
3304      *
3305      * Supported tensor {@link OperandCode}:
3306      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3307      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3308      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3309      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3310      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3311      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3312      *
3313      * Supported tensor rank: from 1
3314      *
3315      * This operation supports broadcasting.
3316      *
3317      * Inputs:
3318      * * 0: A tensor.
3319      * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3320      *      with input0.
3321      *
3322      * Outputs:
3323      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3324      *
3325      * Available since NNAPI feature level 3.
3326      */
3327     ANEURALNETWORKS_EQUAL = 48,
3328 
3329     /**
3330      * Computes exponential of x element-wise.
3331      *
3332      * Supported tensor {@link OperandCode}:
3333      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3334      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3335      *
3336      * Supported tensor rank: from 1.
3337      *
3338      * Inputs:
3339      * * 0: A tensor.
3340      *
3341      * Outputs:
3342      * * 0: The output tensor of same shape as input0.
3343      *
3344      * Available since NNAPI feature level 3.
3345      */
3346     ANEURALNETWORKS_EXP = 49,
3347 
3348     /**
3349      * Inserts a dimension of 1 into a tensor's shape.
3350      *
3351      * Given a tensor input, this operation inserts a dimension of 1 at the
3352      * given dimension index of input's shape. The dimension index starts at
3353      * zero; if you specify a negative dimension index, it is counted backward
3354      * from the end.
3355      *
3356      * Supported tensor {@link OperandCode}:
3357      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3358      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3359      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3360      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3361      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3362      *
3363      * Supported tensor rank: from 1
3364      *
3365      * Inputs:
3366      * * 0: An n-D tensor.
3367      * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the dimension
3368      *      index to expand. Must be in the range [-(n + 1), (n + 1)).
3369      *
3370      * Outputs:
3371      * * 0: An (n + 1)-D tensor with the same {@link OperandCode} and data as
3372      *      input0.
3373      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3374      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3375      *      the scale and zeroPoint must be the same as input0.
3376      *
3377      * Available since NNAPI feature level 3.
3378      */
3379     ANEURALNETWORKS_EXPAND_DIMS = 50,
3380 
3381     /**
3382      * Gathers values along an axis.
3383      *
3384      * Produces an output tensor with shape
3385      *     input0.dimension[:axis] + indices.dimension + input0.dimension[axis + 1:]
3386      * where:
3387      *     # Vector indices (output is rank(input0)).
3388      *     output[a_0, ..., a_n, i, b_0, ..., b_n] =
3389      *       input0[a_0, ..., a_n, indices[i], b_0, ..., b_n]
3390      *
3391      *     # Higher rank indices (output is rank(input0) + rank(indices) - 1).
3392      *     output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
3393      *       input0[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
3394      *
3395      * Supported tensor {@link OperandCode}:
3396      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3397      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3398      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3399      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3400      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3401      *
3402      * Supported tensor rank: from 1
3403      *
3404      * Inputs:
3405      * * 0: An n-D tensor from which to gather values.
3406      * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis.
3407      *      Negative index is used to specify axis from the end
3408      *      (e.g. -1 for the last axis). Must be in the range [-n, n).
3409      * * 2: A k-D tensor {@link ANEURALNETWORKS_TENSOR_INT32} of indices.
3410      *      The values must be in the bounds of the corresponding dimensions
3411      *      of input0.
3412      *
3413      * Outputs:
3414      * * 0: An (n + k - 1)-D tensor with the same {@link OperandCode} as input0.
3415      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3416      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3417      *      the scale and zeroPoint must be the same as input0.
3418      *
3419      * Available since NNAPI feature level 3.
3420      */
3421     ANEURALNETWORKS_GATHER = 51,
3422 
3423     /**
3424      * Generate aixs-aligned bounding box proposals.
3425      *
3426      * Bounding box proposals are generated by applying transformation on a set
3427      * of predefined anchors with the bounding box deltas from bounding box
3428      * regression. A final step of hard NMS is applied to limit the number of
3429      * returned boxes.
3430      *
3431      * Axis-aligned bounding boxes are represented by its upper-left corner
3432      * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
3433      * bounding box should satisfy x1 <= x2 and y1 <= y2.
3434      *
3435      * Supported tensor {@link OperandCode}:
3436      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3437      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3438      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3439      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3440      *
3441      * Inputs:
3442      * * 0: A 4-D Tensor specifying the score of each anchor at each
3443      *      location. With "NHWC" data layout, the tensor shape is
3444      *      [batches, height, width, num_anchors]. With "NCHW" data layout,
3445      *      the tensor shape is [batches, num_anchors, height, width].
3446      * * 1: A 4-D Tensor specifying the bounding box deltas. With "NHWC" data
3447      *      layout, the tensor shape is [batches, height, width, num_anchors * 4].
3448      *      With "NCHW" data layout, the tensor shape is
3449      *      [batches, num_anchors * 4, height, width]. The box deltas are encoded
3450      *      in the order of [dx, dy, dw, dh], where dx and dy is the linear-scale
3451      *      relative correction factor for the center position of the bounding box
3452      *      with respect to the width and height, dw and dh is the log-scale
3453      *      relative correction factor for the width and height. The last
3454      *      dimensions is the channel dimension.
3455      * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
3456      *      predefined anchor, with format [x1, y1, x2, y2]. For input0 of type
3457      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
3458      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this tensor should be of
3459      *      {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with scale of 0.125.
3460      * * 3: A 2-D Tensor of shape [batches, 2], specifying the size of
3461      *      each image in the batch, with format [image_height, image_width].
3462      *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
3463      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this
3464      *      tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with
3465      *      scale of 0.125.
3466      * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
3467      *      from the height of original image to the height of feature map.
3468      * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
3469      *      from the width of original image to the width of feature map.
3470      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum
3471      *      number of boxes before going into the hard NMS algorithm. Boxes
3472      *      with the lowest scores are discarded to meet the limit. Set to
3473      *      a non-positive value for unlimited number.
3474      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum
3475      *      number of boxes returning from the hard NMS algorithm. Boxes
3476      *      with the lowest scores are discarded to meet the limit. Set to
3477      *      a non-positive value for unlimited number.
3478      * * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU
3479      *      threshold for hard NMS.
3480      * * 9: An {@link ANEURALNETWORKS_FLOAT32} scalar, min_size. Boxes with
3481      *      height or width lower than the absolute threshold are filtered out.
3482      * * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3483      *       NCHW data layout for input0 and input1. Set to false for NHWC.
3484      *
3485      * Outputs:
3486      * * 0: A tensor of the same {@link OperandCode} as input0, of shape
3487      *      [num_output_rois], specifying the score of each output box.
3488      *      The boxes are grouped by batches, but the sequential order in
3489      *      each batch is not guaranteed. For type of
3490      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
3491      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, the scale and zero
3492      *      point must be the same as input0.
3493      * * 1: A tensor of the same {@link OperandCode} as input3, of shape
3494      *      [num_output_rois, 4], specifying the coordinates of each output
3495      *      bounding box for each class, with format [x1, y1, x2, y2].
3496      *      The sequential order of the boxes corresponds with output0.
3497      *      For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the
3498      *      scale must be 0.125 and the zero point must be 0.
3499      * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3500      *      [num_output_rois], specifying the batch index of each box. Boxes
3501      *      with the same batch index are grouped together.
3502      *
3503      * Available since NNAPI feature level 3.
3504      */
3505     ANEURALNETWORKS_GENERATE_PROPOSALS = 52,
3506 
3507     /**
3508      * For input tensors x and y, computes x > y elementwise.
3509      *
3510      * Supported tensor {@link OperandCode}:
3511      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3512      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3513      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3514      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3515      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3516      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3517      *
3518      * Supported tensor rank: from 1
3519      *
3520      * This operation supports broadcasting.
3521      *
3522      * Inputs:
3523      * * 0: A tensor.
3524      * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3525      *      with input0.
3526      *
3527      * Outputs:
3528      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3529      *
3530      * Available since NNAPI feature level 3.
3531      */
3532     ANEURALNETWORKS_GREATER = 53,
3533     /**
3534      * For input tensors x and y, computes x >= y elementwise.
3535      *
3536      * Supported tensor {@link OperandCode}:
3537      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3538      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3539      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3540      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3541      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3542      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3543      *
3544      * Supported tensor rank: from 1
3545      *
3546      * This operation supports broadcasting.
3547      *
3548      * Inputs:
3549      * * 0: A tensor.
3550      * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3551      *      with input0.
3552      *
3553      * Outputs:
3554      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3555      *
3556      * Available since NNAPI feature level 3.
3557      */
3558     ANEURALNETWORKS_GREATER_EQUAL = 54,
3559 
3560     /**
3561      * Performs a grouped 2-D convolution operation.
3562      *
3563      * Given an input tensor of shape [batches, height, width, depth_in] and a
3564      * filter tensor of shape [depth_out, filter_height, filter_width, depth_group]
3565      * containing depth_out convolutional filters of depth depth_group, GROUPED_CONV
3566      * applies a group of different filters to each input channel group, then
3567      * concatenates the results together.
3568      *
3569      * Specifically, the input channels are divided into num_groups groups, each with
3570      * depth depth_group, i.e. depth_in = num_groups * depth_group. The convolutional
3571      * filters are also divided into num_groups groups, i.e. depth_out is divisible
3572      * by num_groups. GROUPED_CONV applies each group of filters to the corresponding
3573      * input channel group, and the result are concatenated together.
3574      *
3575      * The output dimensions are functions of the filter dimensions, stride, and
3576      * padding.
3577      *
3578      * The values in the output tensor are computed as:
3579      *
3580      *     output[b, i, j, g * channel_multiplier + q] =
3581      *         sum_{di, dj, dk} (
3582      *             input[b, strides[1] * i + di, strides[2] * j + dj,
3583      *                   g * depth_group + dk] *
3584      *             filter[g * channel_multiplier + q, di, dj, dk]
3585      *         ) + bias[channel]
3586      *
3587      * where channel_multiplier = depth_out / num_groups
3588      *
3589      * Supported tensor {@link OperandCode} configurations:
3590      * * 16 bit floating point:
3591      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.
3592      *
3593      * * 32 bit floating point:
3594      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.
3595      *
3596      * * Quantized:
3597      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.
3598      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
3599      * * * input.scale * filter.scale).
3600      *
3601      * * Quantized signed (since NNAPI feature level 4):
3602      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
3603      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
3604      * * * input.scale * filter.scale).
3605      *
3606      * * Quantized with symmetric per channel quantization for the filter:
3607      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.
3608      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
3609      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
3610      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
3611      *
3612      * * Quantized signed with filter symmetric per channel quantization
3613      *   (since NNAPI feature level 4):
3614      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
3615      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
3616      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
3617      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
3618      *
3619      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
3620      * With the default data layout NHWC, the data is stored in the order of:
3621      * [batch, height, width, channels]. Alternatively, the data layout could
3622      * be NCHW, the data storage order of: [batch, channels, height, width].
3623      *
3624      * Both explicit padding and implicit padding are supported.
3625      *
3626      * Inputs (explicit padding):
3627      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
3628      *      specifying the input, where depth_in = num_groups * depth_group.
3629      * * 1: A 4-D tensor, of shape
3630      *      [depth_out, filter_height, filter_width, depth_group], specifying
3631      *      the filter, where depth_out must be divisible by num_groups.  For
3632      *      tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
3633      *      the channel dimension (channelDim at
3634      *      {@link ANeuralNetworksSymmPerChannelQuantParams}) must be set to 0.
3635      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
3636      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
3637      *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same type.
3638      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3639      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
3640      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
3641      *      of 0 and bias_scale == input_scale * filter_scale. For filter tensor
3642      *      of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
3643      *      should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of
3644      *      0 and bias_scale of 0. The actual scale of each value 'i' is equal to
3645      *      bias_scale[i] = input_scale * filter_scale[i].
3646      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
3647      *      the left, in the ‘width’ dimension.
3648      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
3649      *      the right, in the ‘width’ dimension.
3650      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
3651      *      the top, in the ‘height’ dimension.
3652      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
3653      *      the bottom, in the ‘height’ dimension.
3654      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
3655      *      walking through input in the ‘width’ dimension.
3656      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
3657      *      walking through input in the ‘height’ dimension.
3658      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
3659      *      groups.
3660      * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
3661      *       {@link FuseCode} values. Specifies the activation to
3662      *       invoke on the result.
3663      * * 11: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3664      *       NCHW data layout for input0 and output0. Set to false for NHWC.
3665      *
3666      * Inputs (implicit padding):
3667      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
3668      *      specifying the input, where depth_in = num_groups * depth_group.
3669      * * 1: A 4-D tensor, of shape
3670      *      [depth_out, filter_height, filter_width, depth_group], specifying
3671      *      the filter, where depth_out must be divisible by num_groups.  For
3672      *      tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
3673      *      the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)
3674      *      must be set to 0.
3675      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
3676      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
3677      *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same
3678      *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same type.
3679      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3680      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
3681      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
3682      *      of 0 and bias_scale == input_scale * filter_scale. For filter tensor
3683      *      of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
3684      *      should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of
3685      *      0 and bias_scale of 0. The actual scale of each value 'i' is equal to
3686      *      bias_scale[i] = input_scale * filter_scale[i].
3687      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
3688      *      padding scheme, has to be one of the
3689      *      {@link PaddingCode} values.
3690      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
3691      *      walking through input in the ‘width’ dimension.
3692      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
3693      *      walking through input in the ‘height’ dimension.
3694      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
3695      *      groups.
3696      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
3697      *      {@link FuseCode} values. Specifies the activation to
3698      *      invoke on the result.
3699      * * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3700      *      NCHW data layout for input0 and output0. Set to false for NHWC.
3701      *
3702      * Outputs:
3703      * * 0: The output 4-D tensor, of shape
3704      *      [batches, out_height, out_width, depth_out].
3705      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3706      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3707      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
3708      *
3709      * Available since NNAPI feature level 3.
3710      */
3711     ANEURALNETWORKS_GROUPED_CONV_2D = 55,
3712 
3713     /**
3714      * Localize the maximum keypoints from heatmaps.
3715      *
3716      * This operation approximates the accurate maximum keypoint scores and
3717      * indices after bicubic upscaling by using Taylor expansion up to the
3718      * quadratic term.
3719      *
3720      * The bounding box is represented by its upper-left corner coordinate
3721      * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
3722      * A valid bounding box should satisfy x1 <= x2 and y1 <= y2.
3723      *
3724      * Supported tensor {@link OperandCode}:
3725      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3726      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3727      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3728      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3729      *
3730      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
3731      * With the default data layout NHWC, the data is stored in the order of:
3732      * [batch, height, width, channels]. Alternatively, the data layout could
3733      * be NCHW, the data storage order of: [batch, channels, height, width].
3734      *
3735      * Inputs:
3736      * * 0: A 4-D Tensor of shape
3737      *      [num_boxes, heatmap_size, heatmap_size, num_keypoints],
3738      *      specifying the heatmaps, the height and width of heatmaps should
3739      *      be the same, and must be greater than or equal to 2.
3740      * * 1: A 2-D Tensor of shape [num_boxes, 4], specifying the bounding boxes,
3741      *      each with format [x1, y1, x2, y2]. For input0 of type
3742      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should
3743      *      be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint
3744      *      of 0 and scale of 0.125.
3745      *      For input0 of type
3746      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this tensor
3747      *      should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with
3748      *      zeroPoint of -128 and scale of 0.125.
3749      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3750      *      NCHW data layout for input0. Set to false for NHWC.
3751      *
3752      * Outputs:
3753      * * 0: A tensor of the same {@link OperandCode} as input0, with shape
3754      *      [num_boxes, num_keypoints], specifying score of the keypoints.
3755      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
3756      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3757      *      the scale and zeroPoint can be different from input0 scale and zeroPoint.
3758      * * 1: A tensor of the same {@link OperandCode} as input1, with shape
3759      *      [num_boxes, num_keypoints, 2], specifying the location of
3760      *      the keypoints, the second dimension is organized as
3761      *      [keypoint_x, keypoint_y].
3762      *      For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the
3763      *      scale must be 0.125 and the zero point must be 0.
3764      *
3765      * Available since NNAPI feature level 3.
3766      */
3767     ANEURALNETWORKS_HEATMAP_MAX_KEYPOINT = 56,
3768 
3769     /**
3770      * Applies instance normalization to the input tensor.
3771      *
3772      * The values in the output tensor are computed as:
3773      *
3774      *     output[b, h, w, c] =
3775      *         (input[b, h, w, c] - mean[b, c]) * gamma /
3776      *         sqrt(var[b, c] + epsilon) + beta
3777      *
3778      * Where the mean and variance are computed across the spatial dimensions:
3779      *
3780      *     mean[b, c] =
3781      *         sum_{h, w}(input[b, h, w, c]) / sum(1)
3782      *
3783      *     var[b, c] =
3784      *         sum_{h, w}(pow(input[b, h, w, c] - mean[b, c], 2)) / sum(1)
3785      *
3786      * Supported tensor {@link OperandCode}:
3787      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3788      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3789      *
3790      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
3791      * With the default data layout NHWC, the data is stored in the order of:
3792      * [batch, height, width, channels]. Alternatively, the data layout could
3793      * be NCHW, the data storage order of: [batch, channels, height, width].
3794      *
3795      * Inputs:
3796      * * 0: An n-D tensor, specifying the tensor to be normalized.
3797      * * 1: A scalar, specifying gamma, the scale applied to the normalized
3798      *      tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if
3799      *      input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3800      *      {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3801      *      {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3802      * * 2: A scalar, specifying beta, the offset applied to the normalized
3803      *      tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if
3804      *      input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3805      *      {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3806      *      {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3807      * * 3: A scalar, specifying epsilon, the small value added to variance to
3808      *      avoid dividing by zero. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if
3809      *      input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3810      *      {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3811      *      {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3812      * * 4: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3813      *      NCHW data layout for input0 and output0. Set to false for NHWC.
3814      *
3815      * Outputs:
3816      * * 0: A tensor of the same {@link OperandCode} and same shape as input0.
3817      *
3818      * Available since NNAPI feature level 3.
3819      */
3820     ANEURALNETWORKS_INSTANCE_NORMALIZATION = 57,
3821 
3822     /**
3823      * For input tensors x and y, computes x < y elementwise.
3824      *
3825      * Supported tensor {@link OperandCode}:
3826      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3827      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3828      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3829      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3830      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3831      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3832      *
3833      * Supported tensor rank: from 1
3834      *
3835      * This operation supports broadcasting.
3836      *
3837      * Inputs:
3838      * * 0: A tensor.
3839      * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3840      *      with input0.
3841      *
3842      * Outputs:
3843      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3844      *
3845      * Available since NNAPI feature level 3.
3846      */
3847     ANEURALNETWORKS_LESS = 58,
3848 
3849     /**
3850      * For input tensors x and y, computes x <= y elementwise.
3851      *
3852      * Supported tensor {@link OperandCode}:
3853      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3854      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3855      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3856      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3857      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3858      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3859      *
3860      * Supported tensor rank: from 1
3861      *
3862      * This operation supports broadcasting.
3863      *
3864      * Inputs:
3865      * * 0: A tensor.
3866      * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3867      *      with input0.
3868      *
3869      * Outputs:
3870      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3871      *
3872      * Available since NNAPI feature level 3.
3873      */
3874     ANEURALNETWORKS_LESS_EQUAL = 59,
3875 
3876     /**
3877      * Computes natural logarithm of x element-wise.
3878      *
3879      * Supported tensor {@link OperandCode}:
3880      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3881      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3882      *
3883      * Supported tensor rank: from 1.
3884      *
3885      * Inputs:
3886      * * 0: A tensor.
3887      *
3888      * Outputs:
3889      * * 0: The output tensor of same shape as input0.
3890      *
3891      * Available since NNAPI feature level 3.
3892      */
3893     ANEURALNETWORKS_LOG = 60,
3894 
3895     /**
3896      * Returns the truth value of x AND y element-wise.
3897      *
3898      * Supported tensor {@link OperandCode}:
3899      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3900      *
3901      * Supported tensor rank: from 1
3902      *
3903      * This operation supports broadcasting.
3904      *
3905      * Inputs:
3906      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3907      * * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions
3908      *      compatible with input0.
3909      *
3910      * Outputs:
3911      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3912      *
3913      * Available since NNAPI feature level 3.
3914      */
3915     ANEURALNETWORKS_LOGICAL_AND = 61,
3916 
3917     /**
3918      * Computes the truth value of NOT x element-wise.
3919      *
3920      * Supported tensor {@link OperandCode}:
3921      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3922      *
3923      * Supported tensor rank: from 1.
3924      *
3925      * Inputs:
3926      * * 0: A tensor.
3927      *
3928      * Outputs:
3929      * * 0: The output tensor of same shape as input0.
3930      *
3931      * Available since NNAPI feature level 3.
3932      */
3933     ANEURALNETWORKS_LOGICAL_NOT = 62,
3934 
3935     /**
3936      * Returns the truth value of x OR y element-wise.
3937      *
3938      * Supported tensor {@link OperandCode}:
3939      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3940      *
3941      * Supported tensor rank: from 1
3942      *
3943      * This operation supports broadcasting.
3944      *
3945      * Inputs:
3946      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3947      * * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions
3948      *      compatible with input0.
3949      *
3950      * Outputs:
3951      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3952      *
3953      * Available since NNAPI feature level 3.
3954      */
3955     ANEURALNETWORKS_LOGICAL_OR = 63,
3956 
3957     /**
3958      * Computes the log softmax activations given logits.
3959      *
3960      * The output is calculated using this formula:
3961      *
3962      *     output = logits * beta - log(reduce_sum(exp(logits * beta), axis))
3963      *
3964      * Supported tensor {@link OperandCode}:
3965      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3966      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3967      *
3968      * Supported tensor rank: from 1.
3969      *
3970      * Inputs:
3971      * * 0: A tensor specifying the input logits.
3972      * * 1: A scalar, specifying the positive scaling factor for the exponent,
3973      *      beta.
3974      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta
3975      *      value must be of {@link ANEURALNETWORKS_FLOAT16}.
3976      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta
3977      *      value must be of {@link ANEURALNETWORKS_FLOAT32}.
3978      * * 2: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to
3979      *      reduce across. Negative index is used to specify axis from the
3980      *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
3981      *
3982      * Outputs:
3983      * * 0: The output tensor of the same {@link OperandCode} and shape as
3984      *      input0.
3985      *
3986      * Available since NNAPI feature level 3.
3987      */
3988     ANEURALNETWORKS_LOG_SOFTMAX = 64,
3989 
3990     /**
3991      * Returns the element-wise maximum of two tensors.
3992      *
3993      * Supported tensor {@link OperandCode}:
3994      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3995      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3996      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3997      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3998      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3999      *
4000      * Supported tensor rank: from 1.
4001      *
4002      * Inputs:
4003      * * 0: A tensor.
4004      * * 1: A tensor of the same {@link OperandCode} and compatible dimensions
4005      *      with input0.
4006      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
4007      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
4008      *
4009      * Outputs:
4010      * * 0: A tensor of the same {@link OperandCode} as input0.
4011      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4012      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
4013      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4014      *
4015      * Available since NNAPI feature level 3.
4016      */
4017     ANEURALNETWORKS_MAXIMUM = 65,
4018 
4019     /**
4020      * Returns the element-wise minimum of two tensors.
4021      *
4022      * Supported tensor {@link OperandCode}:
4023      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4024      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4025      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4026      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4027      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4028      *
4029      * Supported tensor rank: from 1.
4030      *
4031      * Inputs:
4032      * * 0: A tensor.
4033      * * 1: A tensor of the same {@link OperandCode} and compatible dimensions
4034      *      with input0.
4035      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
4036      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
4037      *
4038      * Outputs:
4039      * * 0: A tensor of the same {@link OperandCode} as input0.
4040      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4041      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
4042      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4043      *
4044      * Available since NNAPI feature level 3.
4045      */
4046     ANEURALNETWORKS_MINIMUM = 66,
4047 
4048     /**
4049      * Computes numerical negative value element-wise.
4050      *
4051      * Supported tensor {@link OperandCode}:
4052      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4053      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4054      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4055      *
4056      * Supported tensor rank: from 1.
4057      *
4058      * Inputs:
4059      * * 0: A tensor.
4060      *
4061      * Outputs:
4062      * * 0: The output tensor of same shape as input0.
4063      *
4064      * Available since NNAPI feature level 3.
4065      */
4066     ANEURALNETWORKS_NEG = 67,
4067 
4068     /**
4069      * For input tensors x and y, computes x != y elementwise.
4070      *
4071      * Supported tensor {@link OperandCode}:
4072      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
4073      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4074      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4075      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4076      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4077      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4078      *
4079      * Supported tensor rank: from 1
4080      *
4081      * This operation supports broadcasting.
4082      *
4083      * Inputs:
4084      * * 0: A tensor.
4085      * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
4086      *      with input0.
4087      *
4088      * Outputs:
4089      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
4090      *
4091      * Available since NNAPI feature level 3.
4092      */
4093     ANEURALNETWORKS_NOT_EQUAL = 68,
4094 
4095     /**
4096      * Pads a tensor with the given constant value according to the specified
4097      * paddings.
4098      *
4099      * Supported tensor {@link OperandCode}:
4100      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4101      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4102      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4103      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4104      *
4105      * Supported tensor rank: up to 4
4106      *
4107      * Inputs:
4108      * * 0: An n-D tensor, specifying the tensor to be padded.
4109      * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings
4110      *      for each spatial dimension of the input tensor. The shape of the
4111      *      tensor must be {rank(input0), 2}.
4112      *      padding[i, 0] specifies the number of elements to be padded in the
4113      *      front of dimension i.
4114      *      padding[i, 1] specifies the number of elements to be padded after
4115      *      the end of dimension i.
4116      * * 2: A scalar specifying the value to use for padding input0.
4117      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the
4118      *      pad value must be of {@link ANEURALNETWORKS_FLOAT16}.
4119      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the
4120      *      pad value must be of {@link ANEURALNETWORKS_FLOAT32}.
4121      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4122      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
4123      *      the pad value must be of {@link ANEURALNETWORKS_INT32}. The
4124      *      scale and zeroPoint are assumed to be the same as in input0.
4125      *
4126      * Outputs:
4127      * * 0: A tensor of the same {@link OperandCode} as input0. The
4128      *      output tensor has the same rank as input0, and each
4129      *      dimension of the output tensor has the same size as the
4130      *      corresponding dimension of the input tensor plus the size
4131      *      of the padding:
4132      *          output0.dimension[i] =
4133      *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
4134      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4135      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4136      *      the scale and zeroPoint must be the same as input0.
4137      *
4138      * Available since NNAPI feature level 3.
4139      */
4140     ANEURALNETWORKS_PAD_V2 = 69,
4141 
4142     /**
4143      * Computes the power of one value to another.
4144      *
4145      * Given a tensor base and a tensor exponent, this operation computes
4146      * base^exponent elementwise.
4147      *
4148      * This operations supports broadcasting. The size of the output is the
4149      * maximum size along each dimension of the input operands. It starts with
4150      * the trailing dimensions, and works its way forward.
4151      *
4152      * For example:
4153      *     base.dimension     =    {4, 1, 2}
4154      *     exponent.dimension = {5, 4, 3, 1}
4155      *     output.dimension   = {5, 4, 3, 2}
4156      *
4157      * Supported tensor {@link OperandCode}:
4158      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4159      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4160      *
4161      * Supported tensor rank: from 1
4162      *
4163      * Inputs:
4164      * * 0: A tensor specifying the base.
4165      * * 1: A tensor specifying the exponent.
4166      *
4167      * Outputs:
4168      * * 0: An output tensor.
4169      *
4170      * Available since NNAPI feature level 3.
4171      */
4172     ANEURALNETWORKS_POW = 70,
4173 
4174     /**
4175      * Parametric Rectified Linear Unit.
4176      *
4177      * It follows: f(x) = alpha * x for x < 0, f(x) = x for x >= 0, where alpha
4178      * is a learned array with the same {@link OperandCode} and compatible
4179      * dimensions as input x.
4180      *
4181      * Two dimensions are compatible when:
4182      *     1. they are equal, or
4183      *     2. one of them is 1
4184      *
4185      * The size of the output is the maximum size along each dimension of the
4186      * input operands. It starts with the trailing dimensions, and works its way
4187      * forward.
4188      *
4189      * Example:
4190      *     input.dimension  =    {4, 1, 2}
4191      *     alpha.dimension  = {5, 4, 3, 1}
4192      *     output.dimension = {5, 4, 3, 2}
4193      *
4194      * Supported tensor {@link OperandCode}:
4195      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4196      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4197      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4198      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4199      *
4200      * Supported tensor rank: from 1
4201      *
4202      * Inputs:
4203      * * 0: A tensor, specifying the input.
4204      * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
4205      *      as input0, specifying the alpha.
4206      *
4207      * Outputs:
4208      * * 0: A tensor of the same {@link OperandCode} as input0.
4209      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4210      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4211      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
4212      *
4213      * Available since NNAPI feature level 3.
4214      */
4215     ANEURALNETWORKS_PRELU = 71,
4216 
4217     /**
4218      * Quantizes the input tensor.
4219      *
4220      * The formula for {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} output tensor is:
4221      *
4222      *     output = max(0, min(255, round(input / scale) + zeroPoint)
4223      *
4224      * The formula for {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} output
4225      * tensor is:
4226      *
4227      *     output = max(-128, min(127, round(input / scale) + zeroPoint)
4228      *
4229      * Supported input tensor {@link OperandCode}:
4230      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4231      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4232      *
4233      * Supported output tensor {@link OperandCode}:
4234      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4235      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4236      *
4237      * Supported tensor rank: from 1
4238      *
4239      * Inputs:
4240      * * 0: A tensor, may be zero-sized.
4241      *
4242      * Outputs:
4243      * * 0: The output tensor of same shape as input0, but with
4244      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or.
4245      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}.
4246      *
4247      * Available since NNAPI feature level 3.
4248      */
4249     ANEURALNETWORKS_QUANTIZE = 72,
4250 
4251     /**
4252      * A version of quantized LSTM, using 16 bit quantization for internal
4253      * state.
4254      *
4255      * There is no projection layer, so cell state size is equal to the output
4256      * size.
4257      *
4258      * Inputs:
4259      * * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4260      *      and shape [numBatches, inputSize] specifying the input to the LSTM
4261      *      cell. Tensor is quantized with a fixed quantization range of
4262      *      [-1, 127/128] (scale = 1/128, zeroPoint = 128).
4263      * * 1: The input-to-input weights.
4264      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4265      *      and shape [outputSize, inputSize] specifying input-to-input part of
4266      *      weights for fully-connected layer inside the LSTM cell.
4267      *      Quantization zero point and scale must be the same across all the
4268      *      weights.
4269      * * 2: The input-to-forget weights.
4270      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4271      *      and shape [outputSize, inputSize] specifying input-to-forget part of
4272      *      weights for fully-connected layer inside the LSTM cell.
4273      *      Quantization zero point and scale must be the same across all the
4274      *      weights.
4275      * * 3: The input-to-cell weights.
4276      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4277      *      and shape [outputSize, inputSize] specifying input-to-cell part of
4278      *      weights for fully-connected layer inside the LSTM cell.
4279      *      Quantization zero point and scale must be the same across all the
4280      *      weights.
4281      * * 4: The input-to-output weights.
4282      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4283      *      and shape [outputSize, inputSize] specifying input-to-output part of
4284      *      weights for fully-connected layer inside the LSTM cell.
4285      *      Quantization zero point and scale must be the same across all the
4286      *      weights.
4287      * * 5: The recurrent-to-input weights.
4288      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4289      *      and shape [outputSize, outputSize] specifying recurrent-to-input part
4290      *      of weights for fully-connected layer inside the LSTM cell.
4291      *      Quantization zero point and scale must be the same across all the
4292      *      weights.
4293      * * 6: The recurrent-to-forget weights.
4294      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4295      *      and shape [outputSize, outputSize] specifying recurrent-to-forget
4296      *      part of weights for fully-connected layer inside the LSTM cell.
4297      *      Quantization zero point and scale must be the same across all the
4298      *      weights.
4299      * * 7: The recurrent-to-cell weights.
4300      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4301      *      and shape [outputSize, outputSize] specifying recurrent-to-cell part
4302      *      of weights for fully-connected layer inside the LSTM cell.
4303      *      Quantization zero point and scale must be the same across all the
4304      *      weights.
4305      * * 8: The recurrent-to-output weights.
4306      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4307      *      and shape [outputSize, outputSize] specifying recurrent-to-output
4308      *      part of weights for fully-connected layer inside the LSTM cell.
4309      *      Quantization zero point and scale must be the same across all the
4310      *      weights.
4311      * * 9: The input gate bias.
4312      *      A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
4313      *      [outputSize] specifying the bias for the fully-connected layer
4314      *      inside the LSTM cell. Bias is quantized with scale being a product
4315      *      of input and weights scales and zeroPoint equal to 0.
4316      * * 10:The forget gate bias.
4317      *      A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
4318      *      [outputSize] specifying the bias for the fully-connected layer
4319      *      inside the LSTM cell. Bias is quantized with scale being a product
4320      *      of input and weights scales and zeroPoint equal to 0.
4321      * * 11:The cell bias.
4322      *      A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
4323      *      [outputSize] specifying the bias for the fully-connected layer
4324      *      inside the LSTM cell. Bias is quantized with scale being a product
4325      *      of input and weights scales and zeroPoint equal to 0.
4326      * * 12:The output gate bias.
4327      *      A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
4328      *      [outputSize] specifying the bias for the fully-connected layer
4329      *      inside the LSTM cell. Bias is quantized with scale being a product
4330      *      of input and weights scales and zeroPoint equal to 0.
4331      * * 13: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
4332      *       and shape [numBatches, outputSize] specifying the cell state from the
4333      *       previous time step of the LSTM cell. It is quantized using a
4334      *       quantization range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 /
4335      *       32768, zeroPoint = 0).
4336      * * 14: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4337      *       and shape [numBathes, outputSize] specifying the output of the LSTM
4338      *       cell from previous time-step. Tensor is quantized with a fixed
4339      *       quantization range of [-1, 127/128] (scale = 1/128, zeroPoint =
4340      *       128).
4341      *
4342      *
4343      * Outputs:
4344      * * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
4345      *      and shape [numBatches, outputSize] which contains a cell state from
4346      *      the current time step. Tensor is quantized using a quantization
4347      *      range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 32768, zeroPoint =
4348      *      0).
4349      * * 1: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4350      *      and shape [numBathes, outputSize] which contains the output value.
4351      *      Tensor is quantized with a fixed quantization range of [-1, 127/128]
4352      *      (scale = 1/128, zeroPoint = 128).
4353      */
4354     ANEURALNETWORKS_QUANTIZED_16BIT_LSTM = 73,
4355 
4356     /**
4357      * Draws samples from a multinomial distribution.
4358      *
4359      * Supported tensor {@link OperandCode}:
4360      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4361      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4362      *
4363      * Inputs:
4364      * * 0: A 2-D tensor with shape [batches, classes], specifying the
4365      *      unnormalized log-probabilities for all classes.
4366      * * 1: A scalar {@link ANEURALNETWORKS_INT32}, specifying the number of
4367      *      independent samples to draw for each row slice.
4368      * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape [2],
4369      *      specifying seeds used to initialize the random distribution. If both
4370      *      provided seeds are 0, both will be randomly generated.
4371      * Outputs:
4372      * * 0: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape
4373      *      [batches, samples], containing the drawn samples.
4374      *
4375      * Available since NNAPI feature level 3.
4376      */
4377     ANEURALNETWORKS_RANDOM_MULTINOMIAL = 74,
4378 
4379     /**
4380      * Reduces a tensor by computing the "logical and" of elements along given
4381      * dimensions.
4382      *
4383      * If keep_dims is true, the reduced dimensions are
4384      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4385      * 1 for each entry in dimensions.
4386      *
4387      * Supported tensor {@link OperandCode}:
4388      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
4389      *
4390      * Supported tensor rank: up to 4
4391      *
4392      * Inputs:
4393      * * 0: An n-D tensor.
4394      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4395      *      to reduce. Dimension values must be in the range [-n, n).
4396      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4397      *      retains reduced dimensions with length 1.
4398      *
4399      * Outputs:
4400      * * 0: A tensor of the same {@link OperandCode} as input0.
4401      *      If all dimensions are reduced and keep_dims is false, the output
4402      *      shape is [1].
4403      *
4404      * Available since NNAPI feature level 3.
4405      */
4406     ANEURALNETWORKS_REDUCE_ALL = 75,
4407 
4408     /**
4409      * Reduces a tensor by computing the "logical or" of elements along given
4410      * dimensions.
4411      *
4412      * If keep_dims is true, the reduced dimensions are
4413      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4414      * 1 for each entry in dimensions.
4415      *
4416      * Supported tensor {@link OperandCode}:
4417      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
4418      *
4419      * Supported tensor rank: up to 4
4420      *
4421      * Inputs:
4422      * * 0: An n-D tensor.
4423      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4424      *      to reduce. Dimension values must be in the range [-n, n).
4425      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4426      *      retains reduced dimensions with length 1.
4427      *
4428      * Outputs:
4429      * * 0: A tensor of the same {@link OperandCode} as input0.
4430      *      If all dimensions are reduced and keep_dims is false, the output
4431      *      shape is [1].
4432      *
4433      * Available since NNAPI feature level 3.
4434      */
4435     ANEURALNETWORKS_REDUCE_ANY = 76,
4436 
4437     /**
4438      * Reduces a tensor by computing the maximum of elements along given
4439      * dimensions.
4440      *
4441      * If keep_dims is true, the reduced dimensions are
4442      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4443      * 1 for each entry in dimensions.
4444      *
4445      * Supported tensor {@link OperandCode}:
4446      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4447      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4448      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4449      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4450      *
4451      * Supported tensor rank: up to 4
4452      *
4453      * Inputs:
4454      * * 0: An n-D tensor.
4455      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4456      *      to reduce. Dimension values must be in the range [-n, n).
4457      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4458      *      retains reduced dimensions with length 1.
4459      *
4460      * Outputs:
4461      * * 0: A tensor of the same {@link OperandCode} as input0.
4462      *      If all dimensions are reduced and keep_dims is false, the output
4463      *      shape is [1].
4464      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4465      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4466      *      the scale and zeroPoint must be the same as input0.
4467      *
4468      * Available since NNAPI feature level 3.
4469      */
4470     ANEURALNETWORKS_REDUCE_MAX = 77,
4471 
4472     /**
4473      * Reduces a tensor by computing the minimum of elements along given
4474      * dimensions.
4475      *
4476      * If keep_dims is true, the reduced dimensions are
4477      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4478      * 1 for each entry in dimensions.
4479      *
4480      * Supported tensor {@link OperandCode}:
4481      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4482      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4483      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4484      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4485      *
4486      * Supported tensor rank: up to 4
4487      *
4488      * Inputs:
4489      * * 0: An n-D tensor.
4490      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4491      *      to reduce. Dimension values must be in the range [-n, n).
4492      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4493      *      retains reduced dimensions with length 1.
4494      *
4495      * Outputs:
4496      * * 0: A tensor of the same {@link OperandCode} as input0.
4497      *      If all dimensions are reduced and keep_dims is false, the output
4498      *      shape is [1].
4499      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4500      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4501      *      the scale and zeroPoint must be the same as input0.
4502      *
4503      * Available since NNAPI feature level 3.
4504      */
4505     ANEURALNETWORKS_REDUCE_MIN = 78,
4506 
4507     /**
4508      * Reduces a tensor by multiplying elements along given dimensions.
4509      *
4510      * If keep_dims is true, the reduced dimensions are
4511      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4512      * 1 for each entry in dimensions.
4513      *
4514      * Supported tensor {@link OperandCode}:
4515      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4516      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4517      *
4518      * Supported tensor rank: up to 4
4519      *
4520      * Inputs:
4521      * * 0: An n-D tensor.
4522      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4523      *      to reduce. Dimension values must be in the range [-n, n).
4524      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4525      *      retains reduced dimensions with length 1.
4526      *
4527      * Outputs:
4528      * * 0: A tensor of the same {@link OperandCode} as input0.
4529      *      If all dimensions are reduced and keep_dims is false, the output
4530      *      shape is [1].
4531      *
4532      * Available since NNAPI feature level 3.
4533      */
4534     ANEURALNETWORKS_REDUCE_PROD = 79,
4535 
4536     /**
4537      * Reduces a tensor by summing elements along given dimensions.
4538      *
4539      * If keep_dims is true, the reduced dimensions are
4540      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4541      * 1 for each entry in dimensions.
4542      *
4543      * Supported tensor {@link OperandCode}:
4544      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4545      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4546      *
4547      * Supported tensor rank: up to 4
4548      *
4549      * Inputs:
4550      * * 0: An n-D tensor.
4551      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4552      *      to reduce. Dimension values must be in the range [-n, n).
4553      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4554      *      retains reduced dimensions with length 1.
4555      *
4556      * Outputs:
4557      * * 0: A tensor of the same {@link OperandCode} as input0.
4558      *      If all dimensions are reduced and keep_dims is false, the output
4559      *      shape is [1].
4560      *
4561      * Available since NNAPI feature level 3.
4562      */
4563     ANEURALNETWORKS_REDUCE_SUM = 80,
4564 
4565     /**
4566      * Select and scale the feature map of each region of interest to a unified
4567      * output size by average pooling sampling points from bilinear interpolation.
4568      *
4569      * The region of interest is represented by its upper-left corner coordinate
4570      * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
4571      * A spatial scaling factor is applied to map into feature map coordinate.
4572      * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
4573      *
4574      * No rounding is applied in this operation. The sampling points are unified
4575      * distributed in the pooling bin and their values are calculated by bilinear
4576      * interpolation.
4577      *
4578      * Supported tensor {@link OperandCode}:
4579      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4580      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4581      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4582      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4583      *
4584      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4585      * With the default data layout NHWC, the data is stored in the order of:
4586      * [batch, height, width, channels]. Alternatively, the data layout could
4587      * be NCHW, the data storage order of: [batch, channels, height, width].
4588      *
4589      * Inputs:
4590      * * 0: A 4-D tensor, specifying the feature map.
4591      * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
4592      *      the regions of interest, each line with format [x1, y1, x2, y2].
4593      *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
4594      *      this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},
4595      *      with zeroPoint of 0 and scale of 0.125. Zero num_rois is
4596      *      supported for this tensor.
4597      * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
4598      *      [num_rois], specifying the batch index of each box. Boxes with
4599      *      the same batch index are grouped together. Zero num_rois is
4600      *      supported for this tensor.
4601      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
4602      *      height of the output tensor.
4603      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
4604      *      width of the output tensor.
4605      * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
4606      *      from the height of original image to the height of feature map.
4607      * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
4608      *      from the width of original image to the width of feature map.
4609      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
4610      *      sampling points in height dimension used to compute the output.
4611      *      Set to 0 for adaptive value of ceil(roi_height/out_height).
4612      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
4613      *      sampling points in width dimension used to compute the output.
4614      *      Set to 0 for adaptive value of ceil(roi_width/out_width).
4615      * * 9: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
4616      *      NCHW data layout for input0 and output0. Set to false for NHWC.
4617      *
4618      * Outputs:
4619      * * 0: A tensor of the same {@link OperandCode} as input0. The output
4620      *      shape is [num_rois, out_height, out_width, depth].
4621      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4622      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4623      *      the scale and zeroPoint can be different from the input0 scale and zeroPoint.
4624      *
4625      * Available since NNAPI feature level 3.
4626      */
4627     ANEURALNETWORKS_ROI_ALIGN = 81,
4628 
4629     /**
4630      * Select and scale the feature map of each region of interest to a unified
4631      * output size by max-pooling.
4632      *
4633      * The region of interest is represented by its upper-left corner coordinate
4634      * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
4635      * A spatial scaling factor is applied to map into feature map coordinate.
4636      * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
4637      *
4638      * Rounding is applied in this operation to ensure integer boundary for
4639      * regions of interest and pooling bins.
4640      *
4641      * Supported tensor {@link OperandCode}:
4642      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4643      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4644      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4645      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4646      *
4647      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4648      * With the default data layout NHWC, the data is stored in the order of:
4649      * [batch, height, width, channels]. Alternatively, the data layout could
4650      * be NCHW, the data storage order of: [batch, channels, height, width].
4651      *
4652      * Inputs:
4653      * * 0: A 4-D tensor, specifying the feature map.
4654      * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
4655      *      the regions of interest, each line with format [x1, y1, x2, y2].
4656      *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4657      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4658      *      this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},
4659      *      with zeroPoint of 0 and scale of 0.125.
4660      * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
4661      *      [num_rois], specifying the batch index of each box. Boxes with
4662      *      the same batch index are grouped together.
4663      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
4664      *      height of the output tensor.
4665      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
4666      *      width of the output tensor.
4667      * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
4668      *      from the height of original image to the height of feature map.
4669      * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
4670      *      from the width of original image to the width of feature map.
4671      * * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
4672      *      NCHW data layout for input0 and output0. Set to false for NHWC.
4673      *
4674      * Outputs:
4675      * * 0: A tensor of the same {@link OperandCode} as input0. The output
4676      *      shape is [num_rois, out_height, out_width, depth].
4677      *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4678      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4679      *      the scale and zeroPoint must be the same as input0.
4680      *
4681      * Available since NNAPI feature level 3.
4682      */
4683     ANEURALNETWORKS_ROI_POOLING = 82,
4684 
4685     /**
4686      * Computes reciprocal of square root of x element-wise.
4687      *
4688      * Supported tensor {@link OperandCode}:
4689      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4690      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4691      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 7)
4692      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 7)
4693      *
4694      * Supported tensor rank: from 1.
4695      *
4696      * Inputs:
4697      * * 0: A tensor.
4698      *
4699      * Outputs:
4700      * * 0: The output tensor of same shape as input0.
4701      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4702      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4703      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4704      *
4705      * Available since NNAPI feature level 3.
4706      */
4707     ANEURALNETWORKS_RSQRT = 83,
4708 
4709     /**
4710      * Using a tensor of booleans c and input tensors x and y select values
4711      * elementwise from both input tensors:
4712      *
4713      * O[i] = C[i] ? x[i] : y[i].
4714      *
4715      * Supported tensor {@link OperandCode}:
4716      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4717      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4718      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4719      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4720      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4721      *
4722      * Supported tensor rank: from 1
4723      *
4724      * Inputs:
4725      * * 0: A tensor of type {@link ANEURALNETWORKS_TENSOR_BOOL8} acting as a
4726      *      mask that chooses, based on the value at each element, whether the
4727      *      corresponding element in the output should be taken from input1 (if
4728      *      true) or input2 (if false).
4729      * * 1: An input tensor of the same shape as input0.
4730      * * 2: An input tensor of the same shape and type as input1.
4731      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4732      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4733      *      the scales and zeroPoint can be different from input1 scale and zeroPoint.
4734      *
4735      * Outputs:
4736      * * 0: A tensor of the same type and shape as input1 and input2.
4737      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
4738      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4739      *
4740      * Available since NNAPI feature level 3.
4741      */
4742     ANEURALNETWORKS_SELECT = 84,
4743 
4744     /**
4745      * Computes sin of x element-wise.
4746      *
4747      * Supported tensor {@link OperandCode}:
4748      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4749      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4750      *
4751      * Supported tensor rank: from 1.
4752      *
4753      * Inputs:
4754      * * 0: A tensor.
4755      *
4756      * Outputs:
4757      * * 0: The output tensor of same shape as input0.
4758      *
4759      * Available since NNAPI feature level 3.
4760      */
4761     ANEURALNETWORKS_SIN = 85,
4762 
4763     /**
4764      * Extracts a slice of specified size from the input tensor starting at a
4765      * specified location.
4766      *
4767      * The starting location is specified as a 1-D tensor containing offsets
4768      * for each dimension. The size is specified as a 1-D tensor containing
4769      * either size of a slice along corresponding dimension or -1. In the latter
4770      * case, all the remaining elements in dimension are included in the slice.
4771      *
4772      * A sum of begin offset and a size of a slice must not exceed size of a
4773      * corresponding dimension.
4774      *
4775      * Supported tensor {@link OperandCode}:
4776      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4777      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4778      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4779      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4780      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4781      *
4782      * Supported tensor rank: from 1
4783      *
4784      * Inputs:
4785      * * 0: An n-D tensor to take slice from, may be zero-sized.
4786      * * 1: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying
4787      *      the beginning indices of the slice in each dimension.
4788      * * 2: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying
4789      *      the size of the slice in each dimension.
4790      *
4791      * Outputs:
4792      * * 0: An n-D tensor of the same type as the input containing the slice.
4793      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4794      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4795      *      its scale and zeroPoint has to be same as the input0 scale and zeroPoint.
4796      *
4797      * Available since NNAPI feature level 3.
4798      */
4799     ANEURALNETWORKS_SLICE = 86,
4800 
4801     /**
4802      * Splits a tensor along a given axis into num_splits subtensors.
4803      *
4804      * Supported tensor {@link OperandCode}:
4805      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4806      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4807      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4808      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4809      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4810      *
4811      * Supported tensor rank: from 1
4812      *
4813      * Inputs:
4814      * * 0: An n-D tensor to split.
4815      * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis along
4816      *      which to split.
4817      * * 2: An {@link ANEURALNETWORKS_INT32} scalar indicating the number of
4818      *      splits along given axis. Must evenly divide axis size.
4819      *
4820      * Outputs:
4821      * * 0 ~ (num_splits - 1): Resulting subtensors.
4822      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4823      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4824      *      the scale and zeroPoint must be the same as input0.
4825      *
4826      * Available since NNAPI feature level 3.
4827      */
4828     ANEURALNETWORKS_SPLIT = 87,
4829 
4830     /**
4831      * Computes square root of x element-wise.
4832      *
4833      * Supported tensor {@link OperandCode}:
4834      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4835      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4836      *
4837      * Supported tensor rank: from 1.
4838      *
4839      * Inputs:
4840      * * 0: A tensor.
4841      *
4842      * Outputs:
4843      * * 0: The output tensor of same shape as input0.
4844      *
4845      * Available since NNAPI feature level 3.
4846      */
4847     ANEURALNETWORKS_SQRT = 88,
4848 
4849     /**
4850      * Constructs a tensor by tiling a given tensor.
4851      *
4852      * This operation creates a new tensor by replicating `input` `multiples`
4853      * times. The output tensor's i-th dimension has `input.dims(i) * multiples[i]`
4854      * elements, and the values of `input` are replicated `multiples[i]` times
4855      * along the i-th dimension.
4856      * For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`.
4857      *
4858      * Supported tensor {@link OperandCode}:
4859      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4860      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4861      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4862      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4863      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4864      *
4865      * Supported tensor rank: from 1
4866      *
4867      * Inputs:
4868      * * 0: input, an n-D tensor specifying the input.
4869      * * 1: multiples, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}.
4870      *      The length of multiples must be n.
4871      *
4872      * Outputs:
4873      * * 0: A tiled tensor of the same {@link OperandCode} and rank as `input`.
4874      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4875      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4876      *      the scale and zeroPoint must be the same as input0.
4877      *
4878      * Available since NNAPI feature level 3.
4879      */
4880     ANEURALNETWORKS_TILE = 89,
4881 
4882     /**
4883      * Finds values and indices of the k largest entries for the last dimension.
4884      *
4885      * Resulting values in each dimensions are sorted in descending order. If
4886      * two values are equal, the one with larger index appears first.
4887      *
4888      * Supported tensor {@link OperandCode}:
4889      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4890      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4891      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4892      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4893      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4894      *
4895      * Supported tensor rank: from 1
4896      *
4897      * Inputs:
4898      * * 0: input, an n-D tensor specifying the input.
4899      * * 1: k, an {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
4900      *      top elements to look for along the last dimension.
4901      *
4902      * Outputs:
4903      * * 0: An n-D tensor of the same type as the input, containing the k
4904      *      largest elements along each last dimensional slice.
4905      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4906      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4907      *      the scale and zeroPoint must be the same as input0.
4908      * * 1: An n-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32}
4909      *      containing the indices of values within the last dimension of input.
4910      *
4911      * Available since NNAPI feature level 3.
4912      */
4913     ANEURALNETWORKS_TOPK_V2 = 90,
4914 
4915     /**
4916      * Performs the transpose of 2-D convolution operation.
4917      *
4918      * This operation is sometimes called "deconvolution" after Deconvolutional
4919      * Networks, but is actually the transpose (gradient) of
4920      * {@link ANEURALNETWORKS_CONV_2D} rather than an actual deconvolution.
4921      *
4922      * The output dimensions are functions of the filter dimensions, stride, and
4923      * padding.
4924      *
4925      * Supported tensor {@link OperandCode} configurations:
4926      * * 16 bit floating point:
4927      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.
4928      *
4929      * * 32 bit floating point:
4930      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.
4931      *
4932      * * Quantized:
4933      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.
4934      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
4935      * * * input.scale * filter.scale).
4936      *
4937      * * Quantized with symmetric per channel quantization for the filter:
4938      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.
4939      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
4940      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
4941      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
4942      *
4943      * Available since NNAPI feature level 4:
4944      * * Quantized signed (since NNAPI feature level 4):
4945      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
4946      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
4947      * * * input.scale * filter.scale).
4948      *
4949      * * Quantized signed with filter symmetric per channel quantization
4950      *   (since NNAPI feature level 4):
4951      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
4952      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
4953      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
4954      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
4955      *
4956      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4957      * With the default data layout NHWC, the data is stored in the order of:
4958      * [batch, height, width, channels]. Alternatively, the data layout could
4959      * be NCHW, the data storage order of: [batch, channels, height, width].
4960      *
4961      * Both explicit padding and implicit padding are supported.
4962      *
4963      * Inputs (explicit padding):
4964      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
4965      *      specifying the input.
4966      *      Since API level 29, zero batches is supported for this tensor.
4967      * * 1: A 4-D tensor, of shape
4968      *      [depth_out, filter_height, filter_width, depth_in], specifying the
4969      *      filter. For tensor of type
4970      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
4971      *      dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0.
4972      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
4973      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
4974      *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the
4975      *      same type.
4976      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4977      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
4978      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32},
4979      *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
4980      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
4981      *      the bias must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
4982      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
4983      *      bias_scale[i] = input_scale * filter_scale[i].
4984      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
4985      *      the left, in the ‘width’ dimension.
4986      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
4987      *      the right, in the ‘width’ dimension.
4988      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
4989      *      the top, in the ‘height’ dimension.
4990      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
4991      *      the bottom, in the ‘height’ dimension.
4992      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
4993      *      walking through input in the ‘width’ dimension.
4994      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
4995      *      walking through input in the ‘height’ dimension.
4996      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
4997      *      {@link FuseCode} values. Specifies the activation to
4998      *      invoke on the result.
4999      * * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
5000      *       NCHW data layout for input0 and output0. Set to false for NHWC.
5001      *
5002      * Inputs (implicit padding):
5003      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
5004      *      specifying the input.
5005      *      Since API level 29, zero batches is supported for this tensor.
5006      * * 1: A 4-D tensor, of shape
5007      *      [depth_out, filter_height, filter_width, depth_in], specifying the
5008      *      filter. For tensor of type
5009      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
5010      *      dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0.
5011      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
5012      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
5013      *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias should be of the
5014      *      same type.
5015      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
5016      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
5017      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32},
5018      *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
5019      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
5020      *      the bias must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
5021      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
5022      *      bias_scale[i] = input_scale * filter_scale[i].
5023      * * 3: An {@link ANEURALNETWORKS_TENSOR_INT32} tensor, specifying the output
5024      *      tensor shape.
5025      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
5026      *      padding scheme, has to be one of the
5027      *      {@link PaddingCode} values.
5028      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
5029      *      walking through input in the ‘width’ dimension.
5030      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
5031      *      walking through input in the ‘height’ dimension.
5032      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
5033      *      {@link FuseCode} values. Specifies the activation to
5034      *      invoke on the result.
5035      * * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
5036      *      NCHW data layout for input0 and output0. Set to false for NHWC.
5037      *
5038      * Outputs:
5039      * * 0: The output 4-D tensor, of shape
5040      *      [batches, out_height, out_width, depth_out].
5041      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
5042      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5043      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
5044      *
5045      * Available since NNAPI feature level 3.
5046      */
5047     ANEURALNETWORKS_TRANSPOSE_CONV_2D = 91,
5048 
5049     /**
5050      * A recurrent neural network specified by an LSTM cell.
5051      *
5052      * Performs (fully) dynamic unrolling of input.
5053      *
5054      * This Op unrolls the input along the time dimension, and implements the
5055      * following operation for each element in the sequence
5056      * s = 1...sequence_length:
5057      *   outputs[s] = projection(state = activation(LSTMOp(inputs[s])))
5058      *
5059      * Where LSTMOp is the LSTM op as in {@link ANEURALNETWORKS_LSTM},
5060      * the "projection" is an optional projection layer from state and output
5061      * and the “activation” is the function passed as the
5062      * “fused_activation_function” argument (if not “NONE”).
5063      *
5064      * Supported tensor {@link OperandCode}:
5065      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5066      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5067      *
5068      * Supported tensor rank: 3, either time-major or batch-major.
5069      *
5070      * All input and output tensors must be of the same type.
5071      *
5072      * Inputs:
5073      * * 0: The input (\f$x_t\f$).
5074      *      A 3-D tensor of shape:
5075      *        If time-major: [max_time, batch_size, input_size]
5076      *        If batch-major: [batch_size, max_time, input_size]
5077      *      where “max_time” is the number of timesteps (sequence length),
5078      *      “batch_size” corresponds to the batching dimension, and
5079      *      “input_size” is the size of the input.
5080      * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
5081      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
5082      *      corresponds to the number of cell units.
5083      * * 2: The input-to-forget weights (\f$W_{xf}\f$).
5084      *      A 2-D tensor of shape [num_units, input_size].
5085      * * 3: The input-to-cell weights (\f$W_{xc}\f$).
5086      *      A 2-D tensor of shape [num_units, input_size].
5087      * * 4: The input-to-output weights (\f$W_{xo}\f$).
5088      *      A 2-D tensor of shape [num_units, input_size].
5089      * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
5090      *      A 2-D tensor of shape [num_units, output_size], where “output_size”
5091      *      corresponds to either the number of cell units (i.e., “num_units”),
5092      *      or the second dimension of the “projection_weights”, if defined.
5093      * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
5094      *      A 2-D tensor of shape [num_units, output_size].
5095      * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
5096      *      A 2-D tensor of shape [num_units, output_size].
5097      * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
5098      *      A 2-D tensor of shape [num_units, output_size].
5099      * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
5100      *      A 1-D tensor of shape [num_units].
5101      * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
5102      *      A 1-D tensor of shape [num_units].
5103      * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
5104      *      A 1-D tensor of shape [num_units].
5105      * * 12:The input gate bias (\f$b_i\f$). Optional.
5106      *      A 1-D tensor of shape [num_units].
5107      * * 13:The forget gate bias (\f$b_f\f$).
5108      *      A 1-D tensor of shape [num_units].
5109      * * 14:The cell bias (\f$b_c\f$).
5110      *      A 1-D tensor of shape [num_units].
5111      * * 15:The output gate bias (\f$b_o\f$).
5112      *      A 1-D tensor of shape [num_units].
5113      * * 16:The projection weights (\f$W_{proj}\f$). Optional.
5114      *      A 2-D tensor of shape [output_size, num_units].
5115      * * 17:The projection bias (\f$b_{proj}\f$). Optional.
5116      *      A 1-D tensor of shape [output_size].
5117      * * 18:The output state (in) (\f$h_{t-1}\f$).
5118      *      A 2-D tensor of shape [batch_size, output_size].
5119      * * 19:The cell state (in) (\f$C_{t-1}\f$).
5120      *      A 2-D tensor of shape [batch_size, num_units].
5121      * * 20:The activation function (\f$g\f$).
5122      *      A value indicating the activation function:
5123      *      <ul>
5124      *      <li>0: None;
5125      *      <li>1: Relu;
5126      *      <li>3: Relu6;
5127      *      <li>4: Tanh;
5128      *      <li>6: Sigmoid.
5129      *      </ul>
5130      * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
5131      *      that values are bound within [-cell_clip, cell_clip]. If set to 0.0
5132      *      then clipping is disabled.
5133      * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
5134      *      projection layer, such that values are bound within
5135      *      [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
5136      * * 23:Time-major if true, batch-major if false.
5137      * * 24:The input layer normalization weights. Optional.
5138      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5139      *      to activation at input gate.
5140      * * 25:The forget layer normalization weights. Optional.
5141      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5142      *      to activation at forget gate.
5143      * * 26:The cell layer normalization weights. Optional.
5144      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5145      *      to activation at cell gate.
5146      * * 27:The output layer normalization weights. Optional.
5147      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5148      *      to activation at output gate.
5149      *
5150      * Outputs:
5151      * * 0: The output (\f$o_t\f$).
5152      *      A 3-D tensor of shape:
5153      *        If time-major: [max_time, batch_size, output_size]
5154      *        If batch-major: [batch_size, max_time, output_size]
5155      * * 1: A tensor of shape [batch_size, output_size] containing a hidden
5156      *      state from the last time step in the sequence. This output is
5157      *      optional and can be omitted. If this output is present then
5158      *      output #2 must be present as well.
5159      *      Available since NNAPI feature level 4.
5160      * * 2: A tensor of shape [batch_size, cell_size] containing a cell state
5161      *      from the last time step in the sequence. This output is optional
5162      *      and can be omitted.
5163      *      Available since NNAPI feature level 4.
5164      *
5165      * Available since NNAPI feature level 3.
5166      *
5167      * Important: As of NNAPI feature level 3, there is no way to get the output state tensors out
5168      * and NNAPI does not maintain internal states. This operator does not support the usage pattern
5169      * in which multiple cells are chained and state tensors are propagated.
5170      */
5171     ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM = 92,
5172 
5173     /**
5174      * A recurrent neural network layer that applies a basic RNN cell to a
5175      * sequence of inputs.
5176      *
5177      * This layer unrolls the input along the sequence dimension, and implements
5178      * the following operation
5179      * for each element in the sequence s = 1...sequence_length:
5180      *   outputs[s] = state = activation(inputs[s] * input_weights’ + state *
5181      *   recurrent_weights’ + bias)
5182      *
5183      * Where:
5184      * * “input_weights” is a weight matrix that multiplies the inputs;
5185      * * “recurrent_weights” is a weight matrix that multiplies the current
5186      *    “state” which itself is the output from the previous time step
5187      *    computation;
5188      * * “bias” is a bias vector (added to each output vector in the batch);
5189      * * “activation” is the function passed as the “fused_activation_function”
5190      *   argument (if not “NONE”).
5191      *
5192      * Supported tensor {@link OperandCode}:
5193      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5194      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5195      *
5196      * The input tensors must all be the same type.
5197      *
5198      * Inputs:
5199      * * 0: input.
5200      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
5201      *      it is set to 1, then the input has a shape [maxTime, batchSize,
5202      *      inputSize], otherwise the input has a shape [batchSize, maxTime,
5203      *      inputSize].
5204      * * 1: weights.
5205      *      A 2-D tensor of shape [numUnits, inputSize].
5206      * * 2: recurrent_weights.
5207      *      A 2-D tensor of shape [numUnits, numUnits].
5208      * * 3: bias.
5209      *      A 1-D tensor of shape [numUnits].
5210      * * 4: hidden state
5211      *      A 2-D tensor of shape [batchSize, numUnits]. Specifies a hidden
5212      *      state input for the first time step of the computation.
5213      * * 5: fusedActivationFunction.
5214      *      A {@link FuseCode} value indicating the activation function. If
5215      *      “NONE” is specified then it results in a linear activation.
5216      * * 6: timeMajor
5217      *      An {@link ANEURALNETWORKS_INT32} scalar specifying the shape format
5218      *      of input and output tensors. Must be set to either 0 or 1.
5219      * Outputs:
5220      * * 0: output.
5221      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
5222      *      it is set to 1, then the output has a shape [maxTime, batchSize,
5223      *      numUnits], otherwise the output has a shape [batchSize, maxTime,
5224      *      numUnits].
5225      * * 1: A tensor of shape [batchSize, numUnits] containing hidden state
5226      *      from the last time step in the sequence. This output is optional
5227      *      and can be omitted.
5228      *      Available since NNAPI feature level 4.
5229      *
5230      * Available since NNAPI feature level 3.
5231      *
5232      * Important: As of NNAPI feature level 3, there is no way to get the output state tensors out
5233      * and NNAPI does not maintain internal states. This operator does not support the usage pattern
5234      * in which multiple cells are chained and state tensors are propagated.
5235      */
5236     ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN = 93,
5237 
5238     /**
5239      * Resizes images to given size using the nearest neighbor interpretation.
5240      *
5241      * Resized images must be distorted if their output aspect ratio is not the
5242      * same as input aspect ratio. The corner pixels of output may not be the
5243      * same as corner pixels of input.
5244      *
5245      * Supported tensor {@link OperandCode}:
5246      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5247      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5248      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
5249      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
5250      *
5251      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
5252      * With the default data layout NHWC, the data is stored in the order of:
5253      * [batch, height, width, channels]. Alternatively, the data layout could
5254      * be NCHW, the data storage order of: [batch, channels, height, width].
5255      *
5256      * Both resizing by shape and resizing by scale are supported.
5257      *
5258      * Inputs (resizing by shape):
5259      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
5260      *      the input. Zero batches is supported for this tensor.
5261      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
5262      *      width of the output tensor.
5263      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
5264      *      height of the output tensor.
5265      * * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false.
5266      *      Set to true to specify NCHW data layout for input0 and output0.
5267      * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}
5268      *      scalar, default to false.  If True, the centers of the 4 corner
5269      *      pixels of the input and output tensors are aligned, preserving the
5270      *      values at the corner pixels.
5271      *      Available since NNAPI feature level 4.
5272      * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}
5273      *      scalar, default to false. If True, the pixel centers are assumed to
5274      *      be at (0.5, 0.5). This is the default behavior of image.resize in
5275      *      TF 2.0. If this parameter is True, then align_corners parameter
5276      *      must be False.
5277      *      Available since NNAPI feature level 4.
5278      *
5279      * Inputs (resizing by scale):
5280      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
5281      *      the input. Zero batches is supported for this tensor.
5282      * * 1: A scalar, specifying width_scale, the scaling factor of the width
5283      *      dimension from the input tensor to the output tensor. The output
5284      *      width is calculated as new_width = floor(width * width_scale).
5285      *      The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
5286      *      of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
5287      *      {@link ANEURALNETWORKS_FLOAT32} otherwise.
5288      * * 2: A scalar, specifying height_scale, the scaling factor of the height
5289      *      dimension from the input tensor to the output tensor. The output
5290      *      height is calculated as new_height = floor(height * height_scale).
5291      *      The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
5292      *      of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
5293      *      {@link ANEURALNETWORKS_FLOAT32} otherwise.
5294      * * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false.
5295      *      Set to true to specify NCHW data layout for input0 and output0.
5296      * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}
5297      *      scalar, default to false.  If True, the centers of the 4 corner
5298      *      pixels of the input and output tensors are aligned, preserving the
5299      *      values at the corner pixels.
5300      *      Available since NNAPI feature level 4.
5301      * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}
5302      *      scalar, default to false. If True, the pixel centers are assumed to
5303      *      be at (0.5, 0.5). This is the default behavior of image.resize in
5304      *      TF 2.0. If this parameter is True, then align_corners parameter
5305      *      must be False.
5306      *      Available since NNAPI feature level 4.
5307      *
5308      * Outputs:
5309      * * 0: The output 4-D tensor, of shape
5310      *      [batches, new_height, new_width, depth].
5311      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
5312      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5313      *      the scale and zeroPoint must be the same as input0.
5314      *
5315      * Available since NNAPI feature level 3.
5316      */
5317     ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR = 94,
5318 
5319     // Operations below are available since NNAPI feature level 4.
5320 
5321     /**
5322      * Quantized version of {@link ANEURALNETWORKS_LSTM}.
5323      *
5324      * The input and the output use asymmetric quantized types, while the rest
5325      * use symmetric ones.
5326      *
5327      * Inputs:
5328      * * 0: The input to the LSTM cell.
5329      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5330      *      Shape: [batchSize, inputSize]
5331      * * 1: The input-to-input weights. Optional.
5332      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5333      *      Shape: [numUnits, inputSize]
5334      * * 2: The input-to-forget weights.
5335      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5336      *      Shape: [numUnits, inputSize]
5337      * * 3: The input-to-cell weights.
5338      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5339      *      Shape: [numUnits, inputSize]
5340      * * 4: The input-to-output weights.
5341      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5342      *      Shape: [numUnits, inputSize]
5343      * * 5: The recurrent-to-input weights. Optional.
5344      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5345      *      Shape: [numUnits, outputSize]
5346      * * 6: The recurrent-to-forget weights.
5347      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5348      *      Shape: [numUnits, outputSize]
5349      * * 7: The recurrent-to-cell weights.
5350      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5351      *      Shape: [numUnits, outputSize]
5352      * * 8: The recurrent-to-output weights.
5353      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5354      *      Shape: [numUnits, outputSize]
5355      * * 9: The cell-to-input weights (for peephole). Optional.
5356      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5357      *      Shape: [numUnits]
5358      * * 10: The cell-to-forget weights (for peephole). Optional.
5359      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5360      *       Shape: [numUnits]
5361      * * 11: The cell-to-output weights (for peephole). Optional.
5362      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5363      *       Shape: [numUnits]
5364      * * 12: The input gate bias. Quantized with scale being the
5365      *       product of input and weights scales and zeroPoint equal to 0.
5366      *       Optional.
5367      *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5368      *       Shape: [numUnits]
5369      * * 13: The forget gate bias. Quantized with scale being the
5370      *       product of input and weights scales and zeroPoint equal to 0.
5371      *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5372      *       Shape: [numUnits]
5373      * * 14: The cell bias. Quantized with scale being the
5374      *       product of input and weights scales and zeroPoint equal to 0.
5375      *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5376      *       Shape: [numUnits]
5377      * * 15: The output gate bias. Quantized with scale being the
5378      *       product of input and weights scales and zeroPoint equal to 0.
5379      *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5380      *       Shape: [numUnits]
5381      * * 16: The projection weights. Optional.
5382      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5383      *       Shape: [outputSize, numUnits]
5384      * * 17: The projection bias. Quantized with scale being the
5385      *       product of input and weights scales and zeroPoint equal to 0.
5386      *       Optional.
5387      *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5388      *       Shape: [outputSize]
5389      * * 18: The output from the previous time step.
5390      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5391      *       Shape: [batchSize, outputSize]
5392      * * 19: The cell state from the previous time step.
5393      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5394      *       Shape: [batchSize, numUnits]
5395      * * 20: The input layer normalization weights. Used to rescale
5396      *       normalized inputs to activation at input gate. Optional.
5397      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5398      *       Shape: [numUnits]
5399      * * 21: The forget layer normalization weights. Used to
5400      *       rescale normalized inputs to activation at forget gate. Optional.
5401      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5402      *       Shape: [numUnits]
5403      * * 22: The cell layer normalization weights. Used to rescale
5404      *       normalized inputs to activation at cell gate. Optional.
5405      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5406      *       Shape: [numUnits]
5407      * * 23: The output layer normalization weights. Used to
5408      *       rescale normalized inputs to activation at output gate. Optional.
5409      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5410      *       Shape: [numUnits]
5411      * * 24: The cell clip. If provided the cell state is clipped
5412      *       by this value prior to the cell output activation. Optional.
5413      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5414      * * 25: The projection clip. If provided and projection is enabled,
5415      *       this is used for clipping the projected values. Optional.
5416      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5417      * * 26: The scale of the intermediate result of matmul,
5418      *       i.e. input to layer normalization, at input gate.
5419      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5420      * * 27: The scale of the intermediate result of matmul,
5421      *       i.e. input to layer normalization, at forget gate.
5422      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5423      * * 28: The scale of the intermediate result of matmul,
5424      *       i.e. input to layer normalization, at cell gate.
5425      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5426      * * 29: The scale of the intermediate result of matmul,
5427      *       i.e. input to layer normalization, at output gate.
5428      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5429      * * 30: The zero point of the hidden state, i.e. input to
5430      *       projection.
5431      *       Type: {@link ANEURALNETWORKS_INT32}.
5432      * * 31: The scale of the hidden state, i.e. input to
5433      *       projection.
5434      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5435      *
5436      * Outputs:
5437      * * 0: The output state (out).
5438      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5439      *      Shape: [batchSize, outputSize]
5440      * * 1: The cell state (out).
5441      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5442      *      Shape: [batchSize, numUnits]
5443      * * 2: The output. This is effectively the same as the current
5444      *      "output state (out)" value.
5445      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5446      *      Shape: [batchSize, outputSize]
5447      *
5448      * Available since NNAPI feature level 4.
5449      */
5450     ANEURALNETWORKS_QUANTIZED_LSTM = 95,
5451 
5452     /**
5453      * Executes one of the two referenced models as determined by a boolean
5454      * value.
5455      *
5456      * The inputs and outputs of the two referenced models must agree with the
5457      * signature of this operation. That is, if the operation has (3 + n) inputs
5458      * and m outputs, both models must have n inputs and m outputs with the same
5459      * types, ranks (if specified), dimensions (if specified), scales,
5460      * zeroPoints, and other operand parameters as the corresponding operation
5461      * inputs and outputs.
5462      *
5463      * Inputs:
5464      * * 0: A value of type {@link ANEURALNETWORKS_TENSOR_BOOL8} and shape [1]
5465      *      that determines which of the two referenced models to execute.
5466      *      The operand must have fully specified dimensions.
5467      * * 1: A {@link ANEURALNETWORKS_MODEL} reference to the model to be
5468      *      executed if the condition is true.
5469      * * 2: A {@link ANEURALNETWORKS_MODEL} reference to the model to be
5470      *      executed if the condition is false.
5471      * * 3 ~ (n + 2): Inputs to be passed to the model selected for execution.
5472      *
5473      * Outputs:
5474      * * 0 ~ (m - 1): Outputs produced by the selected model.
5475      *
5476      * Available since NNAPI feature level 4.
5477      */
5478     ANEURALNETWORKS_IF = 96,
5479 
5480     /**
5481      * Executes the body model until the condition model outputs false.
5482      *
5483      * The inputs to this operation are the condition model, the body model,
5484      * and operand values for the first iteration of the loop. The values are
5485      * implicitly split into three groups of input-output, state-only, and
5486      * input-only values, as described below.
5487      *
5488      * The outputs of this operation are the final values of input-output
5489      * operands.
5490      *
5491      * Both the condition and body model receive (m + k + n) inputs.
5492      * * The first m (m >= 1) inputs are input-output operands. For the first
5493      *   iteration, these are initialized from the corresponding inputs of the
5494      *   WHILE operation. In subsequent iterations, their values come from the
5495      *   corresponding outputs of the body model produced during the previous
5496      *   iteration.
5497      * * The next k (k >= 0) inputs are state-only operands. They are similar to
5498      *   the input-output operands, except that their values are no longer
5499      *   available after the loop terminates.
5500      * * The last n (n >= 0) inputs are input-only operands. Their values come
5501      *   from the corresponding inputs of the WHILE operation.
5502      *
5503      * The body model produces (m + k) outputs.
5504      * * The first m outputs are input-output operands. They become the outputs
5505      *   of the WHILE operation when a termination condition is reached.
5506      * * The last k outputs are state-only operands. Their values are no longer
5507      *   available after the loop terminates.
5508      *
5509      * The numbers m, k, and n are inferred by the runtime as follows:
5510      *     m = (WHILE operation output count)
5511      *     k = (body model output count) - m
5512      *     n = (body model input count) - m - k
5513      *
5514      * The pseudo-code below illustrates the flow of a WHILE operation with
5515      * inputs condition, body, initial_input_output, initial_state, input_only
5516      * (m = 1, k = 1, n = 1):
5517      *
5518      *     input_output = initial_input_output
5519      *     state = initial_state
5520      *     while condition(input_output, state, input_only):
5521      *         input_output, state = body(input_output, state, input_only)
5522      *     return input_output
5523      *
5524      * To prevent infinite loops, there is an implicit execution timeout
5525      * associated with each loop ("loop timeout duration"). See {@link
5526      * ANeuralNetworksExecution_setLoopTimeout}.
5527      *
5528      * Inputs:
5529      * * 0: A {@link ANEURALNETWORKS_MODEL} reference to the condition
5530      *      model. The model must have (m + k + n) inputs with
5531      *      the same types, ranks (if specified), dimensions (if specified),
5532      *      scales, zeroPoints, and other operand parameters as the
5533      *      corresponding inputs of the WHILE operation and exactly one output
5534      *      of {@link ANEURALNETWORKS_TENSOR_BOOL8} and shape [1].
5535      *      The output operand must have fully specified dimensions.
5536      * * 1: A {@link ANEURALNETWORKS_MODEL} reference to the body model.
5537      *      The model must have (m + k + n) inputs and (m + k) outputs with
5538      *      the same types, ranks (if specified), dimensions (if specified),
5539      *      scales, zeroPoints, and other operand parameters as the
5540      *      corresponding inputs and outputs of the WHILE operation.
5541      * * (m inputs): Initial values for input-output operands.
5542      * * (k inputs): Initial values for state-only operands.
5543      * * (n inputs): Values for input-only operands.
5544      *
5545      * Outputs:
5546      * * 0 ~ (m - 1): Outputs produced by the loop.
5547      *
5548      * Available since NNAPI feature level 4.
5549      */
5550     ANEURALNETWORKS_WHILE = 97,
5551 
5552     /**
5553      * Computes exponential linear activation on the input tensor element-wise.
5554      *
5555      * The output is calculated using the following formula:
5556      *
5557      *     ELU(x) = max(0, x) + min(0, alpha * (exp(x) - 1))
5558      *
5559      * Supported tensor {@link OperandCode}:
5560      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5561      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5562      *
5563      * Supported tensor rank: from 1.
5564      *
5565      * Inputs:
5566      * * 0: A tensor, specifying the input. May be zero-sized.
5567      * * 1: A scalar, specifying the alpha parameter.
5568      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16},
5569      *      the alpha value must be of {@link ANEURALNETWORKS_FLOAT16}.
5570      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32},
5571      *      the alpha value must be of {@link ANEURALNETWORKS_FLOAT32}.
5572      *
5573      * Outputs:
5574      * * 0: The output tensor of same shape and type as input0.
5575      *
5576      * Available since NNAPI feature level 4.
5577      */
5578     ANEURALNETWORKS_ELU = 98,
5579 
5580     /**
5581      * Computes hard-swish activation on the input tensor element-wise.
5582      *
5583      * Hard swish activation is introduced in
5584      * https://arxiv.org/pdf/1905.02244.pdf
5585      *
5586      * The output is calculated using the following formula:
5587      *
5588      *     h-swish(x) = x * max(0, min(6, (x + 3))) / 6
5589      *
5590      * Supported tensor {@link OperandCode}:
5591      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5592      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5593      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
5594      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5595      *
5596      * Supported tensor rank: from 1.
5597      *
5598      * Inputs:
5599      * * 0: A tensor, specifying the input. May be zero-sized.
5600      *
5601      * Outputs:
5602      * * 0: The output tensor of same shape and type as input0.
5603      *      Scale and zero point of this tensor may be different from the input
5604      *      tensor's parameters.
5605      *
5606      * Available since NNAPI feature level 4.
5607      */
5608     ANEURALNETWORKS_HARD_SWISH = 99,
5609 
5610     /**
5611      * Creates a tensor filled with a scalar value.
5612      *
5613      * Supported output tensor {@link OperandCode}:
5614      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5615      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5616      * * {@link ANEURALNETWORKS_TENSOR_INT32}
5617      *
5618      * Supported tensor rank: from 1.
5619      *
5620      * Inputs:
5621      * * 0: A 1-D tensor, specifying the desired output tensor shape.
5622      * * 1: A scalar, specifying the value to fill the output tensors with.
5623      *      For output tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16},
5624      *      the scalar must be of {@link ANEURALNETWORKS_FLOAT16}.
5625      *      For output tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32},
5626      *      the scalar must be of {@link ANEURALNETWORKS_FLOAT32}.
5627      *      For output tensor of {@link ANEURALNETWORKS_TENSOR_INT32},
5628      *      the scalar must be of {@link ANEURALNETWORKS_INT32}.
5629      *
5630      * Outputs:
5631      * * 0: The output tensor.
5632      *
5633      * Available since NNAPI feature level 4.
5634      */
5635     ANEURALNETWORKS_FILL = 100,
5636 
5637     /**
5638      * Returns the rank of a tensor.
5639      *
5640      * The rank of a tensor is the number of dimensions in it. Also known as
5641      * "order", "degree", "ndims".
5642      *
5643      * Supported tensor {@link OperandCode}:
5644      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5645      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5646      * * {@link ANEURALNETWORKS_TENSOR_INT32}
5647      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
5648      * * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5649      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
5650      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
5651      * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}
5652      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5653      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5654      *
5655      * Supported tensor rank: from 1.
5656      *
5657      * Inputs:
5658      * * 0: The input tensor.
5659      *
5660      * Outputs:
5661      * * 0: A scalar of {@link ANEURALNETWORKS_INT32}, specifying the rank
5662      *      of the input tensor.
5663      *
5664      * Available since NNAPI feature level 4.
5665      */
5666     ANEURALNETWORKS_RANK = 101,
5667 
5668     // Operations below are available since NNAPI feature level 6.
5669 
5670     /**
5671      * Performs multiplication of two tensors in batches.
5672      *
5673      * Multiplies all slices of two input tensors and arranges the individual
5674      * results in a single output tensor of the same batch size. Each pair of
5675      * slices in the same batch have identical {@link OperandCode}. Each
5676      * slice can optionally be adjointed (transpose and conjugate) before
5677      * multiplication.
5678      *
5679      * The two input tensors and the output tensor must be 2-D or higher and
5680      * have the same batch size.
5681      *
5682      * Supported tensor {@link OperandCode}:
5683      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5684      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5685      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5686      * * {@link ANEURALNETWORKS_TENSOR_INT32}
5687      *
5688      * Supported tensor rank: at least 2 and up to 4
5689      *
5690      * Inputs:
5691      * * 0: A tensor with 2-D or higher shape [..., r_x, c_x].
5692      * * 1: A tensor with 2-D or higher shape [..., r_y, c_y]. It has the same
5693      *      {@link OperandCode} and batch size as input0.
5694      * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar adj_x, default
5695      *      to false. Set to true to adjoint the slices of input0.
5696      * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar adj_y, default
5697      *      to false. Set to true to adjoint the slices of input1.
5698      *
5699      * Outputs:
5700      * * 0: A tensor with 2-D or higher shape [..., r_o, c_o], where
5701      *      r_o = c_x if adj_x else r_x
5702      *      c_o = r_y if adj_y else c_y
5703      *
5704      * Available since NNAPI feature level 6.
5705      */
5706     ANEURALNETWORKS_BATCH_MATMUL = 102,
5707 
5708     /**
5709      * Packs N input tensors (N >= 1) of rank R into one output tensor of rank R+1.
5710      * The tensors are packed along a given axis.
5711      *
5712      * The input tensors must have identical {@link OperandCode} and dimensions.
5713      *
5714      * For example, suppose there are N input tensors of shape (A, B, C).
5715      * If axis is 0, the output tensor will have shape (N, A, B, C).
5716      * If axis is 1, the output tensor will have shape (A, N, B, C).
5717      *
5718      * All dimensions through the axis dimension determine the output tile count;
5719      * the remaining dimensions determine the tile shape.
5720      *
5721      * Return to the example of N input tensors of shape (A, B, C).
5722      * If axis is 0, there are N tiles in the output, each of shape (A, B, C).
5723      * If axis is 1, there are A*N tiles in the output, each of shape (B, C).
5724      *
5725      * The coordinates of a tile within the output tensor are (t[0],...,t[axis]).
5726      * The coordinates of a tile within an input tensor are (t[0],...,t[axis-1]).
5727      * (If axis is 0, an input tensor consists of a single tile.)
5728      * If we index input tensors starting with 0 (rather than by operand number),
5729      * then output_tile[t[0],...,t[axis]] = input_tile[t[axis]][t[0],...,t[axis-1]].
5730      * That is, all output tile coordinates except for the axis coordinate select
5731      * the corresponding location within some input tensor; and the axis coordinate
5732      * selects the input tensor.
5733      *
5734      * Supported tensor {@link OperandCode}:
5735      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5736      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5737      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
5738      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5739      * * {@link ANEURALNETWORKS_TENSOR_INT32}
5740      *
5741      * Supported input tensor rank: from 1
5742      *
5743      * Inputs:
5744      * * 0: A scalar of type {@link ANEURALNETWORKS_INT32}, specifying
5745      *      the axis along which to pack.  The valid range is [0, R+1).
5746      * * 1 ~ N: Input tensors to be packed together.
5747      *          For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
5748      *          {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensors,
5749      *          the scales and zeroPoint must be the same for all input tensors,
5750      *          and will be the same for the output tensor.
5751      *
5752      * Outputs:
5753      * * 0: The packed tensor.
5754      *
5755      * Available since NNAPI feature level 6.
5756      */
5757     ANEURALNETWORKS_PACK = 103,
5758 
5759     // Operations below are available since NNAPI feature level 7.
5760 
5761     /**
5762      * Pads a tensor with mirrored values.
5763      *
5764      * This operator specifies one of two padding modes: REFLECT or SYMMETRIC.
5765      * In the case of REFLECT mode, the mirroring excludes the border element
5766      * on the padding side.
5767      * In the case of SYMMETRIC mode, the mirroring includes the border element
5768      * on the padding side.
5769      *
5770      * For example, if the input is the 1-D tensor `[1, 2, 3]` and the padding
5771      * is `[0, 2]` (i.e., pad no elements before the first (and only) dimension,
5772      * and two elements after the first (and only) dimension), then:
5773      *     - REFLECT mode produces the output `[1, 2, 3, 2, 1]`
5774      *     - SYMMETRIC mode produces the output `[1, 2, 3, 3, 2]`
5775      *
5776      * Supported tensor {@link OperandCode}:
5777      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5778      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5779      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
5780      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5781      * * {@link ANEURALNETWORKS_TENSOR_INT32}
5782      *
5783      * Supported tensor rank: from 1.
5784      *
5785      * Inputs:
5786      * * 0: An n-D tensor, specifying the tensor to be padded.
5787      * * 1: A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings
5788      *      for each spatial dimension of the input tensor. The shape of the
5789      *      tensor must be {rank(input0), 2}.
5790      *      padding[i, 0] specifies the number of elements to be padded in the
5791      *      front of dimension i.
5792      *      padding[i, 1] specifies the number of elements to be padded after the
5793      *      end of dimension i.
5794      *      Each padding value must be nonnegative.
5795      *      In the case of REFLECT mode, each padding value must be less than the
5796      *      corresponding dimension.
5797      *      In the case of SYMMETRIC mode, each padding value must be less than or
5798      *      equal to the corresponding dimension.
5799      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the mode.
5800      *      Options are 0:REFLECT and 1:SYMMETRIC.
5801      *
5802      * Outputs:
5803      * * 0: A tensor of the same {@link OperandCode} as input0. The
5804      *      output tensor has the same rank as input0, and each
5805      *      dimension of the output tensor has the same size as the
5806      *      corresponding dimension of the input tensor plus the size
5807      *      of the padding:
5808      *          output0.dimension[i] =
5809      *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
5810      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
5811      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5812      *      the scale and zeroPoint must be the same as input0.
5813      *
5814      * Available since NNAPI feature level 7.
5815      */
5816     ANEURALNETWORKS_MIRROR_PAD = 104,
5817 
5818     /**
5819      * Reverses a specified dimension of a tensor.
5820      *
5821      * Supported tensor {@link OperandCode}:
5822      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5823      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5824      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
5825      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5826      * * {@link ANEURALNETWORKS_TENSOR_INT32}
5827      *
5828      * Supported tensor rank: up to 8.
5829      *
5830      * Inputs:
5831      * * 0: Input tensor of rank n.
5832      * * 1: Axis tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape [1],
5833      *      specifying which dimension of the input tensor is to be reversed. The dimension
5834      *      must be in the range [0, n).
5835      *
5836      * Outputs:
5837      * * 0: The reversed tensor of the same shape as the input tensor.
5838      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
5839      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensors,
5840      *      the scales and zeroPoint must be the same as input0.
5841      *
5842      * Available since NNAPI feature level 7.
5843      */
5844     ANEURALNETWORKS_REVERSE = 105,
5845 } OperationCode;
5846 
5847 /**
5848  * Fused activation function types.
5849  *
5850  * Available since NNAPI feature level 1.
5851  */
5852 typedef enum {
5853     /** NO fused activation function. */
5854     ANEURALNETWORKS_FUSED_NONE = 0,
5855     /** Fused ReLU activation function. */
5856     ANEURALNETWORKS_FUSED_RELU = 1,
5857     /** Fused ReLU1 activation function. */
5858     ANEURALNETWORKS_FUSED_RELU1 = 2,
5859     /** Fused ReLU6 activation function. */
5860     ANEURALNETWORKS_FUSED_RELU6 = 3,
5861 } FuseCode;
5862 
5863 /**
5864  * Implicit padding algorithms.
5865  *
5866  *
5867  * Available since NNAPI feature level 1.
5868  */
5869 typedef enum {
5870     /**
5871      * SAME padding.
5872      * Padding on both ends are the "same":
5873      *     padding_to_beginning =  total_padding / 2
5874      *     padding_to_end       = (total_padding + 1)/2.
5875      * i.e., for even number of padding, padding to both ends are exactly
5876      * the same; for odd number of padding, padding to the ending is bigger
5877      * than the padding to the beginning by 1.
5878      *
5879      * total_padding is a function of input, stride, dilation and filter size.
5880      * It could be computed as follows:
5881      *    out_size = (input + stride - 1) / stride
5882      *    effective_filter_size = (filter_size - 1) * dilation + 1
5883      *    needed_input = (out_size - 1) * stride + effective_filter_size
5884      *    total_padding = max(0, needed_input - input_size)
5885      *  The computation is the same for the horizontal and vertical directions.
5886      */
5887     ANEURALNETWORKS_PADDING_SAME = 1,
5888 
5889     /**
5890      * VALID padding.
5891      * No padding. When the input size is not evenly divisible by
5892      * the filter size, the input at the end that could not fill
5893      * the whole filter tile will simply be ignored.
5894      */
5895     ANEURALNETWORKS_PADDING_VALID = 2,
5896 } PaddingCode;
5897 
5898 /**
5899  * Execution preferences.
5900  *
5901  * Available since NNAPI feature level 1.
5902  */
5903 typedef enum {
5904     /**
5905      * Prefer executing in a way that minimizes battery drain.
5906      * This is desirable for compilations that will be executed often.
5907      */
5908     ANEURALNETWORKS_PREFER_LOW_POWER = 0,
5909     /**
5910      * Prefer returning a single answer as fast as possible, even if this causes
5911      * more power consumption.
5912      */
5913     ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1,
5914     /**
5915      * Prefer maximizing the throughput of successive frames, for example when
5916      * processing successive frames coming from the camera.
5917      */
5918     ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2,
5919 } PreferenceCode;
5920 
5921 /**
5922  * Device types.
5923  *
5924  * The type of NNAPI device.
5925  */
5926 typedef enum {
5927     /** The device type cannot be provided. */
5928     ANEURALNETWORKS_DEVICE_UNKNOWN = 0,
5929     /** The device does not fall into any category below. */
5930     ANEURALNETWORKS_DEVICE_OTHER = 1,
5931     /** The device runs NNAPI models on single or multi-core CPU. */
5932     ANEURALNETWORKS_DEVICE_CPU = 2,
5933     /** The device can run NNAPI models and also accelerate graphics APIs such
5934      * as OpenGL ES and Vulkan. */
5935     ANEURALNETWORKS_DEVICE_GPU = 3,
5936     /** Dedicated accelerator for Machine Learning workloads. */
5937     ANEURALNETWORKS_DEVICE_ACCELERATOR = 4,
5938 } DeviceTypeCode;
5939 
5940 /**
5941  * NNAPI feature levels.
5942  *
5943  * Each update of the NNAPI specification yields a new NNAPI feature level enum value.
5944  * NNAPI feature level corrseponds to an NNAPI specification version that a driver
5945  * and/or the NNAPI runtime can implement.
5946  *
5947  * A feature level up to and including "FEATURE_LEVEL_5" maps directly to
5948  * the Android API level that introduced the corresponding update of the NNAPI
5949  * specification. Feature levels after Android API level 31 have no association with
5950  * API level because the NNAPI specification can be updated between Android API
5951  * releases. Outputs of {@link ANeuralNetworksDevice_getFeatureLevel} and
5952  * {@link ANeuralNetworks_getRuntimeFeatureLevel} must be compared against
5953  * these enum values instead of the Android API level.
5954  */
5955 typedef enum {
5956     /** NNAPI specification available in Android O-MR1, Android NNAPI feature level 1 */
5957     ANEURALNETWORKS_FEATURE_LEVEL_1 = 27,
5958     /** NNAPI specification available in Android P, Android NNAPI feature level 2 */
5959     ANEURALNETWORKS_FEATURE_LEVEL_2 = 28,
5960     /** NNAPI specification available in Android Q, Android NNAPI feature level 3 */
5961     ANEURALNETWORKS_FEATURE_LEVEL_3 = 29,
5962     /** NNAPI specification available in Android R, Android NNAPI feature level 4 */
5963     ANEURALNETWORKS_FEATURE_LEVEL_4 = 30,
5964     /**
5965      * NNAPI specification available in Android S, Android NNAPI feature level 5.
5966      * After Android S, the NNAPI specification can be updated between Android
5967      * API releases.
5968      */
5969     ANEURALNETWORKS_FEATURE_LEVEL_5 = 31,
5970     /** Android NNAPI feature level 6 */
5971     ANEURALNETWORKS_FEATURE_LEVEL_6 = 1000006,
5972     /** Android NNAPI feature level 7 */
5973     ANEURALNETWORKS_FEATURE_LEVEL_7 = 1000007,
5974     /** Android NNAPI feature level 8 */
5975     ANEURALNETWORKS_FEATURE_LEVEL_8 = 1000008,
5976 } FeatureLevelCode;
5977 
5978 /**
5979  * Result codes.
5980  *
5981  * <p>Any NNAPI function can return any result code, including result codes not
5982  * currently documented. Any value other than {@link ANEURALNETWORKS_NO_ERROR}
5983  * indicates a failure of some kind.</p>
5984  *
5985  * <p>Additional information about the nature of a failure can be obtained from
5986  * the device log after enabling NNAPI debugging by setting the debug.nn.vlog
5987  * property to 1, e.g., by calling "adb shell setprop debug.nn.vlog 1".</p>
5988  *
5989  * Available since NNAPI feature level 1.
5990  */
5991 typedef enum {
5992     /**
5993      * Operation was successful.
5994      */
5995     ANEURALNETWORKS_NO_ERROR = 0,
5996 
5997     /**
5998      * Failure caused by not enough available memory.
5999      */
6000     ANEURALNETWORKS_OUT_OF_MEMORY = 1,
6001 
6002     ANEURALNETWORKS_INCOMPLETE = 2,
6003 
6004     /**
6005      * Failure caused by unexpected null argument.
6006      */
6007     ANEURALNETWORKS_UNEXPECTED_NULL = 3,
6008 
6009     /**
6010      * Failure caused by invalid function arguments, invalid model definition,
6011      * invalid execution definition or invalid data at execution time.
6012      */
6013     ANEURALNETWORKS_BAD_DATA = 4,
6014 
6015     /**
6016      * Failure caused by failed model execution.
6017      */
6018     ANEURALNETWORKS_OP_FAILED = 5,
6019 
6020     /**
6021      * Failure caused by object being in the wrong state.
6022      */
6023     ANEURALNETWORKS_BAD_STATE = 6,
6024 
6025     /**
6026      * Failure caused by not being able to map a file into memory.
6027      * This may be caused by a file descriptor not being mappable, or an AHardwareBuffer
6028      * not supported by the device.
6029      * Mitigate by reading its content into memory.
6030      */
6031     ANEURALNETWORKS_UNMAPPABLE = 7,
6032 
6033     /**
6034      * Failure caused by insufficient buffer size provided to a model output.
6035      */
6036     ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE = 8,
6037 
6038     /**
6039      * Failure caused by a device not being available.
6040      */
6041     ANEURALNETWORKS_UNAVAILABLE_DEVICE = 9,
6042 
6043     /**
6044      * Failure because a deadline could not be met for a task, but future
6045      * deadlines may still be met for the same task after a short delay.
6046      *
6047      * Available since NNAPI feature level 4.
6048      */
6049     ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT = 10,
6050 
6051     /**
6052      * Failure because a deadline could not be met for a task, and future
6053      * deadlines will likely also not be met for the same task even after a
6054      * short delay.
6055      *
6056      * Available since NNAPI feature level 4.
6057      */
6058     ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT = 11,
6059 
6060     /**
6061      * Failure because of a resource limitation within the driver, but future
6062      * calls for the same task may still succeed after a short delay.
6063      *
6064      * Available since NNAPI feature level 4.
6065      */
6066     ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT = 12,
6067 
6068     /**
6069      * Failure because of a resource limitation within the driver, and future
6070      * calls for the same task will likely also fail even after a short
6071      * delay.
6072      *
6073      * Available since NNAPI feature level 4.
6074      */
6075     ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT = 13,
6076 
6077     /**
6078      * Failure indicating an object is in a dead state.
6079      *
6080      * Available since NNAPI feature level 4.
6081      */
6082     ANEURALNETWORKS_DEAD_OBJECT = 14,
6083 } ResultCode;
6084 
6085 /**
6086  * For {@link ANeuralNetworksModel_setOperandValue}, values with a
6087  * length smaller or equal to this will be immediately copied into
6088  * the model. The size is in bytes.
6089  *
6090  * Available since NNAPI feature level 1.
6091  */
6092 enum { ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES = 128 };
6093 
6094 /**
6095  * For {@link ANeuralNetworksCompilation_setCaching}, specify the size
6096  * of the cache token required from the application. The size is in bytes.
6097  *
6098  * Available since NNAPI feature level 3.
6099  */
6100 enum { ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN = 32 };
6101 
6102 /**
6103  * Different duration measurements.
6104  *
6105  * Durations are measured in nanoseconds.
6106  *
6107  * Available since NNAPI feature level 3.
6108  */
6109 typedef enum {
6110     // Execution time on hardware (not driver, which runs on host processor).
6111     ANEURALNETWORKS_DURATION_ON_HARDWARE = 0,
6112     // Execution time in driver (including time on hardware).  Excludes overhead
6113     // such as that of the runtime itself and the IPC needed for the runtime to
6114     // communicate with the driver.
6115     ANEURALNETWORKS_DURATION_IN_DRIVER = 1,
6116     // Execution time on hardware, after all dependencies have been signaled.
6117     // If no dependencies specified (for example, if the execution was scheduled other
6118     // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the
6119     // reported time will be the same as ANEURALNETWORKS_DURATION_ON_HARDWARE.
6120     // Available since NNAPI feature level 4.
6121     ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE = 2,
6122     // Execution time in driver, after all dependencies have been signaled. Excludes
6123     // overhead such as that of the runtime itself and the IPC needed for the runtime
6124     // to communicate with the driver.
6125     // If no dependencies specified (for example, if the execution was scheduled other
6126     // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the
6127     // reported time will be the same as ANEURALNETWORKS_DURATION_IN_DRIVER.
6128     // Available since NNAPI feature level 4.
6129     ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER = 3,
6130 } DurationCode;
6131 
6132 /**
6133  * Relative execution priority.
6134  *
6135  * Available since NNAPI feature level 4.
6136  */
6137 typedef enum {
6138     ANEURALNETWORKS_PRIORITY_LOW = 90,
6139     ANEURALNETWORKS_PRIORITY_MEDIUM = 100,
6140     ANEURALNETWORKS_PRIORITY_HIGH = 110,
6141     ANEURALNETWORKS_PRIORITY_DEFAULT = ANEURALNETWORKS_PRIORITY_MEDIUM,
6142 } PriorityCode;
6143 
6144 /**
6145  * ANeuralNetworksMemory is an opaque type that represents memory.
6146  *
6147  * This type is used to represent shared memory, memory mapped files,
6148  * and similar memories.
6149  *
6150  * By using shared memory, a program can efficiently communicate to the
6151  * runtime and drivers the tensors that define a model. See
6152  * {@link ANeuralNetworksModel_setOperandValueFromMemory}. An application
6153  * should typically create one shared memory object that contains every constant tensor
6154  * needed to define a model. {@link ANeuralNetworksMemory_createFromFd} can be used to
6155  * create shared memory from a file handle.
6156  * {@link ANeuralNetworksMemory_createFromAHardwareBuffer} can be used to
6157  * create shared memory from an AHardwareBuffer handle.
6158  *
6159  * Memory objects can also be used to specify the input and output arguments of
6160  * an execution. See {@link ANeuralNetworksExecution_setInputFromMemory}
6161  * and {@link ANeuralNetworksExecution_setOutputFromMemory}.
6162  *
6163  * When calling {@link ANeuralNetworksModel_setOperandValueFromMemory},
6164  * {@link ANeuralNetworksExecution_setInputFromMemory} and
6165  * {@link ANeuralNetworksExecution_setOutputFromMemory}, each operand in the shared
6166  * memory object must be aligned on a boundary of a byte size that is a multiple
6167  * of the element type byte size, e.g., a tensor with
6168  * {@link ANEURALNETWORKS_TENSOR_FLOAT32} type must be aligned on 4-byte boundary.
6169  *
6170  * It is the application's responsibility to ensure that there are no uses of
6171  * the memory after calling {@link ANeuralNetworksMemory_free}. This includes
6172  * any model which references this memory because of a call to
6173  * {@link ANeuralNetworksModel_setOperandValueFromMemory}, any compilation
6174  * created using such a model, any execution object or burst object created
6175  * using such a compilation, or any execution which references this memory
6176  * because of a call to {@link ANeuralNetworksExecution_setInputFromMemory} or
6177  * {@link ANeuralNetworksExecution_setOutputFromMemory}.
6178  *
6179  * Available since NNAPI feature level 1.
6180  *
6181  * Starting at NNAPI feature level 4, the application may request creation of device native memory
6182  * from {@link ANeuralNetworksMemoryDesc} to avoid potential memory copying and transformation
6183  * overhead between executions. See also {@link ANeuralNetworksMemoryDesc} and
6184  * {@link ANeuralNetworksMemory_createFromDesc}.
6185  */
6186 typedef struct ANeuralNetworksMemory ANeuralNetworksMemory;
6187 
6188 /**
6189  * ANeuralNetworksModel is an opaque type that contains a description of the
6190  * mathematical operations that constitute the model.
6191  *
6192  * <p>Build the model by calling<ul>
6193  * <li>{@link ANeuralNetworksModel_create}</li>
6194  * <li>{@link ANeuralNetworksModel_addOperation}</li>
6195  * <li>{@link ANeuralNetworksModel_addOperand}</li>
6196  * </ul>
6197  *
6198  * This forms a graph in which each operation and operand is a node, a
6199  * directed edge from an operand to an operation indicates that the
6200  * operand is an input to the operation, and a directed edge from an
6201  * operation to an operand indicates that the operand is an output
6202  * from the operation. This graph must be acyclic.
6203  *
6204  * A model is completed by calling {@link ANeuralNetworksModel_finish}.
6205  * A model is destroyed by calling {@link ANeuralNetworksModel_free}.
6206  *
6207  * <p>A model cannot be modified once {@link ANeuralNetworksModel_finish}
6208  * has been called on it.</p>
6209  *
6210  * <p>It is the application's responsibility to make sure that only one thread
6211  * modifies a model at a given time. It is however safe for more than one
6212  * thread to use the model once {@link ANeuralNetworksModel_finish} has returned.</p>
6213  *
6214  * <p>It is also the application's responsibility to ensure that there are no
6215  * other uses of the model after calling {@link ANeuralNetworksModel_free}.
6216  * This includes any compilation, execution object or burst object created using
6217  * the model.</p>
6218  *
6219  * Available since NNAPI feature level 1.
6220  */
6221 typedef struct ANeuralNetworksModel ANeuralNetworksModel;
6222 
6223 /**
6224  * ANeuralNetworksCompilation is an opaque type that can be used to compile
6225  * a machine learning model.
6226  *
6227  * <p>To use:<ul>
6228  *    <li>Create a new compilation instance by calling the
6229  *        {@link ANeuralNetworksCompilation_create} function or
6230  *        {@link ANeuralNetworksCompilation_createForDevices}.</li>
6231  *    <li>Set any desired properties on the compilation (for example,
6232  *        {@link ANeuralNetworksCompilation_setPreference}).</li>
6233  *    <li>Optionally, set the caching signature and the cache directory on the
6234  *        compilation by calling {@link ANeuralNetworksCompilation_setCaching}.</li>
6235  *    <li>Complete the compilation with {@link ANeuralNetworksCompilation_finish}.</li>
6236  *    <li>Use the compilation as many times as needed
6237  *        with {@link ANeuralNetworksExecution_create} and
6238  *        {@link ANeuralNetworksBurst_create}.</li>
6239  *    <li>Destroy the compilation with {@link ANeuralNetworksCompilation_free}
6240  *        once all executions using the compilation have completed.</li></ul></p>
6241  *
6242  * A compilation is completed by calling {@link ANeuralNetworksCompilation_finish}.
6243  * A compilation is destroyed by calling {@link ANeuralNetworksCompilation_free}.
6244  *
6245  * <p>A compilation cannot be modified once {@link ANeuralNetworksCompilation_finish}
6246  * has been called on it.</p>
6247  *
6248  * <p>It is the application's responsibility to make sure that only
6249  * one thread modifies a compilation at a given time. It is however
6250  * safe for more than one thread to use the compilation once
6251  * {@link ANeuralNetworksCompilation_finish} has returned.</p>
6252  *
6253  * <p>It is also the application's responsibility to ensure that there are no other
6254  * uses of the compilation after calling {@link ANeuralNetworksCompilation_free}.
6255  * This includes any execution object or burst object created using the compilation,
6256  * or any memory descriptor with the compilation as part of one of the roles specified by
6257  * {@link ANeuralNetworksMemoryDesc_addInputRole} or
6258  * {@link ANeuralNetworksMemoryDesc_addOutputRole}.</p>
6259  *
6260  * Available since NNAPI feature level 1.
6261  */
6262 typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation;
6263 
6264 /**
6265  * ANeuralNetworksExecution is an opaque type that can be used to apply a machine
6266  * learning model to a set of inputs.
6267  *
6268  * <p>To use:<ul>
6269  *    <li>Create a new execution instance by calling the
6270  *        {@link ANeuralNetworksExecution_create} function.</li>
6271  *    <li>Associate input buffers or memory regions to the model inputs with
6272  *        {@link ANeuralNetworksExecution_setInput} or
6273  *        {@link ANeuralNetworksExecution_setInputFromMemory}.</li>
6274  *    <li>Associate output buffers or memory regions to the model outputs with
6275  *        {@link ANeuralNetworksExecution_setOutput} or
6276  *        {@link ANeuralNetworksExecution_setOutputFromMemory}.</li>
6277  *    <li>Optionally, configure the execution with
6278  *        {@link ANeuralNetworksExecution_setLoopTimeout},
6279  *        {@link ANeuralNetworksExecution_setMeasureTiming},
6280  *        {@link ANeuralNetworksExecution_setReusable}, or
6281  *        {@link ANeuralNetworksExecution_setTimeout}.
6282  *    <li>Apply the model with one of the following:</li><ul>
6283  *        <li>Asynchronously with {@link ANeuralNetworksExecution_startCompute}
6284  *            or with {@link ANeuralNetworksExecution_startComputeWithDependencies},
6285  *            waiting for the execution to complete with
6286  *            {@link ANeuralNetworksEvent_wait}.</li>
6287  *        <li>Synchronously with {@link ANeuralNetworksExecution_compute}.</li>
6288  *        <li>Synchronously as part of an execution burst with
6289  *            {@link ANeuralNetworksExecution_burstCompute}.</li></ul>
6290  *        If the execution has been marked as reusable, then you can
6291  *        apply the model more than once.
6292  *    <li>Destroy the execution with
6293  *        {@link ANeuralNetworksExecution_free}.</li></ul></p>
6294  *
6295  * <p>An output buffer or memory region must not overlap with any
6296  * other output buffer or memory region, with an input buffer or
6297  * memory region, or with an operand value in a memory object
6298  * ({@link ANeuralNetworksModel_setOperandValueFromMemory}).</p>
6299  *
6300  * <p>An execution is in the preparation state after it is created by
6301  * {@link ANeuralNetworksExecution_create}. An execution may only be modified in the preparation
6302  * state. Scheduling a computation by calling {@link ANeuralNetworksExecution_burstCompute},
6303  * {@link ANeuralNetworksExecution_compute}, {@link ANeuralNetworksExecution_startCompute},
6304  * or {@link ANeuralNetworksExecution_startComputeWithDependencies} will change the state of
6305  * the execution object to the computation state. When the computation completes, the state of
6306  * the execution object will change from the computation state to the completed state.
6307  * The computation is completed when {@link ANeuralNetworksExecution_compute},
6308  * {@link ANeuralNetworksExecution_burstCompute}, or {@link ANeuralNetworksEvent_wait}
6309  * has returned.</p>
6310  *
6311  * <p>An execution can be applied to a model with
6312  * {@link ANeuralNetworksExecution_burstCompute},
6313  * {@link ANeuralNetworksExecution_compute},
6314  * {@link ANeuralNetworksExecution_startCompute} or
6315  * {@link ANeuralNetworksExecution_startComputeWithDependencies} only once. Create new
6316  * executions to do new evaluations of the model.</p>
6317  *
6318  * <p>Starting at NNAPI feature level 5, the application may call
6319  * {@link ANeuralNetworksExecution_setReusable} to set an execution to be reusable for multiple
6320  * computations. The application may schedule and evaluate a computation again from the completed
6321  * state of a reusable execution. The execution cannot be modified between computations.</p>
6322  *
6323  * <p>It is the application's responsibility to make sure that only one thread
6324  * modifies an execution at a given time. It is however safe for more than one
6325  * thread to use {@link ANeuralNetworksEvent_wait} at the same time.</p>
6326  *
6327  * <p>It is also the application's responsibility to ensure that the execution
6328  * either has never been scheduled or has completed (i.e., that
6329  * {@link ANeuralNetworksExecution_burstCompute},
6330  * {@link ANeuralNetworksExecution_compute}, or
6331  * {@link ANeuralNetworksEvent_wait} has returned) before calling
6332  * {@link ANeuralNetworksExecution_free}.</p>.
6333  *
6334  * <p>It is also the application's responsibility to ensure that there are no other
6335  * uses of the execution after calling {@link ANeuralNetworksExecution_free}.</p>
6336  *
6337  * <p>It is the application's responsibility to ensure that there are no concurrent computations
6338  * scheduled and evaluated on the same execution, either by means of
6339  * {@link ANeuralNetworksExecution_compute} or
6340  * {@link ANeuralNetworksExecution_burstCompute} (which are synchronous)
6341  * in different threads, or by means of
6342  * {@link ANeuralNetworksExecution_startCompute} or
6343  * {@link ANeuralNetworksExecution_startComputeWithDependencies} (which are asynchronous).
6344  * It is however safe to schedule and evaluate multiple computations on different executions
6345  * concurrently. (Concurrent uses of {@link ANeuralNetworksExecution_burstCompute} must be on
6346  * different burst objects.) The runtime makes no guarantee on the ordering of
6347  * completion of executions. If it's important to the application, the
6348  * application should enforce the ordering by ensuring that one execution
6349  * completes before the next is scheduled (for example, by scheduling all
6350  * executions synchronously within a single thread, or by scheduling all
6351  * executions asynchronously and using {@link ANeuralNetworksEvent_wait} between
6352  * calls to {@link ANeuralNetworksExecution_startCompute}); or by using
6353  * {@link ANeuralNetworksExecution_startComputeWithDependencies} to make the execution wait for a
6354  * list of events to be signaled before starting the actual evaluation.</p>
6355  *
6356  * Available since NNAPI feature level 1.
6357  */
6358 typedef struct ANeuralNetworksExecution ANeuralNetworksExecution;
6359 
6360 /**
6361  * Parameters for ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL operand.
6362  */
6363 typedef struct ANeuralNetworksSymmPerChannelQuantParams {
6364     /** The index of the channel dimension. */
6365     uint32_t channelDim;
6366     /** The size of the scale array. Should be equal to dimension[channelDim] of the Operand. */
6367     uint32_t scaleCount;
6368     /** The array of scaling values for each channel. Each value must be greater than zero. */
6369     const float* scales;
6370 } ANeuralNetworksSymmPerChannelQuantParams;
6371 
6372 /**
6373  * ANeuralNetworksBurst is an opaque type that can be used to reduce the latency
6374  * of a rapid sequence of executions. It will likely cause overhead if only used
6375  * for a single execution.
6376  *
6377  * ANeuralNetworksBurst serves as a context object for any number of inferences
6378  * using {@link ANeuralNetworksExecution} objects. An ANeuralNetworksBurst
6379  * object and the {@link ANeuralNetworksExecution} objects used with it must all
6380  * have been created from the same {@link ANeuralNetworksCompilation} object.
6381  *
6382  * This object is also used as a hint to drivers, providing insight to the
6383  * lifetime of a rapid sequence of executions. For example, a driver may choose
6384  * to increase the clock frequency of its accelerator for the lifetime of a
6385  * burst object.
6386  *
6387  * <p>To use:<ul>
6388  *    <li>Create a new burst object by calling the
6389  *        {@link ANeuralNetworksBurst_create} function.</li>
6390  *    <li>For each execution:</li><ul>
6391  *        <li>Create {@link ANeuralNetworksExecution} and configure its
6392  *            properties (see {@link ANeuralNetworksExecution} for details).</li>
6393  *        <li>Apply the model synchronously with
6394  *            {@link ANeuralNetworksExecution_burstCompute}, reusing the same
6395  *            {@link ANeuralNetworksBurst} with the new
6396  *            {@link ANeuralNetworksExecution}.</li>
6397  *        <li>Use and free the {@link ANeuralNetworksExecution}.</li></ul>
6398  *    <li>Destroy the burst with
6399  *        {@link ANeuralNetworksBurst_free}.</li></ul></p>
6400  *
6401  * Available since NNAPI feature level 3.
6402  */
6403 typedef struct ANeuralNetworksBurst ANeuralNetworksBurst;
6404 
6405 /**
6406  * ANeuralNetworksOperandType describes the type of an operand.
6407  *
6408  * This structure is used to describe both scalars and tensors.
6409  *
6410  * A tensor operand type with all dimensions specified is "fully
6411  * specified".  Whenever possible (i.e., whenever the dimensions are
6412  * known at model construction time), a tensor operand type should be
6413  * (but is not required to be) fully specified, in order to enable the
6414  * best possible performance.
6415  *
6416  * If a tensor operand's type is not fully specified, the dimensions
6417  * of the operand are deduced from the operand types and values of the
6418  * operation for which that operand is an output or from the corresponding
6419  * {@link ANEURALNETWORKS_IF} or {@link ANEURALNETWORKS_WHILE} operation input
6420  * operand type in the case of referenced model input operands.
6421  *
6422  * <p>In the following situations, a tensor operand type must be fully
6423  * specified:<ul>
6424  *     <li>The operand has a constant value, set by
6425  *         {@link ANeuralNetworksModel_setOperandValue} (with a
6426  *         non-nullptr buffer) or
6427  *         {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li>
6428  *     <li>The operand is a model input (see
6429  *         {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main
6430  *         model within a compilation.  A fully specified tensor operand type
6431  *         must either be provided to {@link ANeuralNetworksModel_addOperand};
6432  *         or it must be provided to the corresponding
6433  *         {@link ANeuralNetworksExecution_setInput}, or
6434  *         {@link ANeuralNetworksExecution_setInputFromMemory}.
6435  *         EXCEPTION: If the input is optional and omitted
6436  *         (by passing nullptr for buffer to
6437  *         {@link ANeuralNetworksExecution_setInput}) then it need
6438  *         not have a fully specified tensor operand type.</li>
6439  *     <li>The operand is a model output (see
6440  *         {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main
6441  *         model within a compilation and is to be used with {@link
6442  *         ANeuralNetworksExecution_startComputeWithDependencies}.
6443  *         A fully specified tensor operand type must either be provided
6444  *         to {@link ANeuralNetworksModel_addOperand}; or it must be
6445  *         provided to the corresponding
6446  *         {@link ANeuralNetworksExecution_setOutput}, or
6447  *         {@link ANeuralNetworksExecution_setOutputFromMemory}.</li></ul>
6448  *
6449  * A tensor operand type of specified rank but some number of
6450  * unspecified dimensions is represented by setting dimensionCount to
6451  * the rank and each unspecified dimension to 0.
6452  *
6453  * Available since NNAPI feature level 1.
6454  *
6455  * Starting at NNAPI feature level 3, a tensor operand type of unspecified rank is
6456  * represented by setting dimensionCount to 0 and dimensions to NULL (just as if
6457  * it were a scalar operand type).
6458  */
6459 typedef struct ANeuralNetworksOperandType {
6460     /**
6461      * The data type, e.g ANEURALNETWORKS_FLOAT32.
6462      */
6463     int32_t type;
6464 
6465     /**
6466      * The number of dimensions (rank).
6467      *
6468      * Must be 0 for scalars.
6469      */
6470     uint32_t dimensionCount;
6471 
6472     /**
6473      * The dimensions of the tensor.
6474      *
6475      * Must be nullptr for scalars.
6476      */
6477     const uint32_t* dimensions;
6478 
6479     /**
6480      * The quantization scale.
6481      *
6482      * Must be 0 when not applicable to an operand type.
6483      *
6484      * See {@link OperandCode}.
6485      */
6486     float scale;
6487 
6488     /**
6489      * The quantization zero point.
6490      *
6491      * Must be 0 when not applicable to an operand type.
6492      *
6493      * See {@link OperandCode}.
6494      */
6495     int32_t zeroPoint;
6496 } ANeuralNetworksOperandType;
6497 
6498 /**
6499  * Aliasing to {@link OperationCode}, used in function
6500  * {@link ANeuralNetworksModel_addOperation}.
6501  */
6502 typedef int32_t ANeuralNetworksOperationType;
6503 
6504 /**
6505  * ANeuralNetworksEvent is an opaque type that represents an event
6506  * that will be signaled once an execution completes.
6507  *
6508  * Available since NNAPI feature level 1.
6509  */
6510 typedef struct ANeuralNetworksEvent ANeuralNetworksEvent;
6511 
6512 /**
6513  * ANeuralNetworksDevice is an opaque type that represents a device.
6514  *
6515  * This type is used to query basic properties and supported operations of the corresponding
6516  * device, and control which device(s) a model is to be run on.
6517  *
6518  * Available since NNAPI feature level 3.
6519  */
6520 typedef struct ANeuralNetworksDevice ANeuralNetworksDevice;
6521 
6522 /**
6523  * ANeuralNetworksMemoryDesc is an opaque type that represents a memory descriptor.
6524  *
6525  * A memory descriptor describes the properties of a memory object, and is used by
6526  * {@link ANeuralNetworksMemory_createFromDesc}.
6527  *
6528  * To use:
6529  *   - Create a new memory descriptor by calling {@link ANeuralNetworksMemoryDesc_create}.
6530  *   - Specify all of the intended input and output roles by calling
6531  *     {@link ANeuralNetworksMemoryDesc_addInputRole} and
6532  *     {@link ANeuralNetworksMemoryDesc_addOutputRole}.
6533  *   - Optionally, specify the memory dimensions by calling
6534  *     {@link ANeuralNetworksMemoryDesc_setDimensions}.
6535  *   - Complete the memory descriptor with {@link ANeuralNetworksMemoryDesc_finish}.
6536  *   - Use the memory descriptor as many times as needed with
6537  *     {@link ANeuralNetworksMemory_createFromDesc}.
6538  *   - Destroy the memory descriptor with {@link ANeuralNetworksMemoryDesc_free}.
6539  *
6540  * A memory descriptor is completed by calling {@link ANeuralNetworksMemoryDesc_finish}.
6541  * A memory descriptor is destroyed by calling {@link ANeuralNetworksMemoryDesc_free}.
6542  *
6543  * A memory descriptor must not be modified once {@link ANeuralNetworksMemoryDesc_finish}
6544  * has been called on it.
6545  *
6546  * It is the application's responsibility to make sure that only
6547  * one thread modifies a memory descriptor at a given time. It is however
6548  * safe for more than one thread to use the memory descriptor once
6549  * {@link ANeuralNetworksMemoryDesc_finish} has returned.
6550  *
6551  * It is also the application's responsibility to ensure that there are no other
6552  * uses of the memory descriptor after calling {@link ANeuralNetworksMemoryDesc_free}.
6553  * It is however safe to continue using a {@link ANeuralNetworksMemory} object created
6554  * from the memory descriptor.
6555  *
6556  * Available since NNAPI feature level 4.
6557  */
6558 typedef struct ANeuralNetworksMemoryDesc ANeuralNetworksMemoryDesc;
6559 
6560 __END_DECLS
6561 
6562 #endif  // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_NEURAL_NETWORKS_TYPES_H
6563 
6564 /** @} */
6565